gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from flask import Flask, jsonify, request
from src.webapp.webapps import *
app = Flask(__name__)
matchDF = get_match_df()
playerPerformanceObj = PlayerPerformance()
########### DropDown Lists ###########
seasonList = get_dropdown_list(matchDF,"season",1,"int")
lboundList = get_dropdown_list(matchDF,"season",1,"int")
uboundList = get_dropdown_list(matchDF,"season",1,"int")
year_list = get_dropdown_list(matchDF,"season",1,"string")
player_names = playerPerformanceObj.getPlayerNames()
teams = ['Rajasthan Royals','Chennai Super Kings','Deccan Chargers','Gujarat Lions','Delhi Daredevils','Mumbai Indians','Kochi Tuskers Kerala','Royal Challengers Bangalore','Pune Warriors','Rising Pune Supergiants','Sunrisers Hyderabad','Kolkata Knight Riders','Kings XI Punjab']
########### Index Page ###########
@app.route("/", methods=["GET"])
def index():
return render_template('index.html')
########### RESTful API Routing & Functionality ###########
@app.route("/overallStandings/help", methods=["GET"])
def returnOverallStandingsHelp():
return render_template("overallRanksHelp.html")
@app.route("/performanceConsistency/help", methods=["GET"])
def returnPerformanceConsistenciesHelp():
return render_template("performanceConsistencyHelp.html")
@app.route("/TeamVsTeamWinPercentage/help", methods=["GET"])
def returnTeamVsTeamWinPercentageHelp():
return render_template("teamVsTeamWinPercentageHelp.html")
@app.route("/PlayerPerformance/help", methods=["GET"])
def returnPlayerPerformanceHelp():
return render_template("playerperformanceHelp.html")
@app.route("/DreamTeam/help", methods=["GET"])
def returnDreamTeamHelp():
return render_template("dreamteamHelp.html")
@app.route("/overallStandings", methods=["GET"])
def returnOverallStandings():
args = request.args
season = args['season']
return jsonify({"Overall_Standings_"+season: overall_rank_jsonify(matchDF, season)})
@app.route("/performanceConsistency", methods=["GET"])
def returnPerformanceConsistencies():
args = request.args
lbound = int(args['lbound'])
ubound = int(args['ubound'])
return jsonify({"Performance_Consistency_"+str(lbound)+"_to_"+str(ubound): consistency_jsonify(matchDF, lbound, ubound)})
@app.route("/TeamVsTeamWinPercentage", methods=["GET"])
def returnTeamVsTeamWinPercentage():
args = request.args
team1 = args['team1']
team2 = args['team2']
return jsonify({"Team_Vs_Team_Win_Percentage_"+team1+"_VS_"+team2: team_vs_team_jsonify(matchDF, team1, team2)})
@app.route("/PlayerPerformance", methods=["GET"])
def returnPlayerPerformance():
args = request.args
player = args['player']
return jsonify({"Player_Performance_"+player: Player_Performance_jsonify(player)})
@app.route("/DreamTeam", methods=["GET"])
def returnDreamTeam():
args = request.args
season1 = args['season1']
season2 = args['season2']
return jsonify({"Dream_Team_"+str(season1)+"_"+str(season2): dream_team_jsonify(season1, season2)})
########### WebApp Routing & Functionality ###########
@app.route("/PlayerPerformance/webapp")
def returnPlayerPerformApp():
# Determine the selected feature
player1 = request.args.get("player1")
if player1 == None:
player1 = "V Kohli"
player2 = request.args.get("player2")
if player2 == None:
player2 = "Average"
# Create the plot
plot = playerPerformanceObj.create_figure_player_performance(player1, player2)
# Embed plot into HTML via Flask Render
return render_template("playerperformance.html", plot=plot, player_names=player_names, player1=player1, player2=player2)
@app.route("/seasonOverview/webapp")
def returnSeasonOverviewWebApp():
# Determine the selected feature
season = request.args.get("season")
if season == None:
season = 2013
else:
season = int(season)
# Create the plot
plot = create_figure_season_overview(matchDF, season)
# Embed plot into HTML via Flask Render
script, div = components(plot)
return render_template("seasonOverview.html",script=script,\
div=div, seasonList=seasonList, season=season)
@app.route("/overallStandings/webapp")
def returnOverallStandingsWebApp():
# Determine the selected feature
season = request.args.get("season")
if season == None:
season = 2013
else:
season = int(season)
# Create the plot
plot = create_figure_overall_ranks(matchDF, season)
# Embed plot into HTML via Flask Render
script, div = components(plot)
return render_template("overallRanks.html", script=script, div=div, seasonList=seasonList, season=season)
@app.route("/performanceConsistency/webapp")
def returnPerformanceConsistencyWebApp():
# Determine the selected feature
lbound = request.args.get("lbound")
ubound = request.args.get("ubound")
if lbound == None:
lbound = 2009
else:
lbound = int(lbound)
if ubound == None:
ubound = 2012
else:
ubound = int(ubound)
if(lbound > ubound):
lbound^=ubound
ubound^=lbound
lbound^=ubound
# Create the plot
plot = create_figure_performance_consistency(matchDF, lbound, ubound)
# Embed plot into HTML via Flask Render
# script, div = components(plot)
# return render_template("performanceConsistency.html",\
# script=script, div=div, lboundList=lboundList,\
# uboundList=uboundList, lbound=lbound,\
# ubound=ubound)
return render_template("performanceConsistency.html", plot=plot,\
lboundList=lboundList,uboundList=uboundList,\
lbound=lbound,ubound=ubound)
@app.route("/TeamVsTeamWinPercentage/webapp")
def returnTeamVsTeamWinPercentageApp():
# Determine the selected feature
flag=0
flag2=0
result=None
team1= request.args.get("team1")
if team1 == None:
team1 = "Rajasthan Royals"
team2= request.args.get("team2")
if team2 == None:
team2 = "Delhi Daredevils"
if(team1==team2):
flag=1
else:
result = create_figure_team_vs_team_win_percentage(matchDF,team1,team2)
if result == None:
flag2=1
return render_template("teamVsTeamWinPercentage.html",result=result,team1=team1,team2=team2,teams=teams,flag=flag,flag2=flag2)
@app.route('/DreamTeam/webapp')
def returnDreamTeamWebApp():
lbound= request.args.get("lbound")
ubound= request.args.get("ubound")
if lbound == None:
lbound = "2008"
if ubound == None:
ubound = "2016"
lseason = int(lbound)
useason = int(ubound)
if(lseason > useason):
lseason^=useason
useason^=lseason
lseason^=useason
lbound = str(lseason)
ubound = str(useason)
plot = create_team(lbound,ubound)
script, div = components(plot)
return render_template("dreamteam.html", script=script, div=div, year_list=year_list,lbound=lbound,ubound=ubound)
if __name__ == "__main__":
app.run(port=5000)
|
|
#-------------------------------------------------------------------------------
# bob: parser.py
#
# Parser for a subset of Scheme. Follows the grammar defined in R5RS
# 7.1.2 (External Representations).
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
from .lexer import Lexer, Token, LexerError
from .expr import Pair, Number, Symbol, Boolean
class ParseError(Exception): pass
class BobParser(object):
""" Recursive-descent parser.
Since Scheme code is also data, this parser mimics the (read) procedure
and reads source code into Scheme expressions (internal data
representation suitable for further analysis).
"""
def __init__(self):
self.lexer = BobLexer()
self.clear()
def parse(self, text):
""" Given a string with Scheme source code, parses it into a list of
expression objects.
"""
self.text = text
self.lexer.input(self.text)
self._next_token()
return self._parse_file()
def clear(self):
""" Reset the parser's internal state.
"""
self.text = ''
self.cur_token = None
def pos2coord(self, pos):
""" Convert a lexing position (offset from start of text) into a
coordinate [line %s, column %s].
"""
# Count the amount of newlines between the beginning of the parsed
# text and pos. Then, count the column as an offset from the last
# newline
#
num_newlines = self.text.count('\n', 0, pos)
line_offset = self.text.rfind('\n', 0, pos)
if line_offset < 0:
line_offset = 0
return '[line %s, column %s]' % (num_newlines + 1, pos - line_offset)
######################-- PRIVATE --######################
def _parse_error(self, msg, token=None):
token = token or self.cur_token
if token:
coord = self.pos2coord(token.pos)
raise ParseError('%s %s' % (msg, coord))
else:
raise ParseError(msg)
def _next_token(self):
try:
while True:
self.cur_token = self.lexer.token()
if self.cur_token is None or self.cur_token.type != 'COMMENT':
break
except LexerError as lexerr:
raise ParseError('syntax error at %s' % self.pos2coord(lexerr.pos))
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._next_token()
return val
else:
self._parse_error('Unmatched %s (found %s)' % (type, self.cur_token.type))
##
## Recursive parsing rules. The top-level is _parse_file, which expects
## a sequence of Scheme expressions. The rest of the rules follow section
## 7.1.2 of R5RS with some re-ordering for programming convenience.
##
def _parse_file(self):
datum_list = []
while self.cur_token:
datum_list.append(self._datum())
return datum_list
def _datum(self):
# list
if self.cur_token.type == 'LPAREN':
return self._list()
# abbreviation
elif self.cur_token.type == 'QUOTE':
return self._abbreviation()
# simple datum
else:
return self._simple_datum()
def _simple_datum(self):
if self.cur_token.type == 'BOOLEAN':
retval = Boolean(self.cur_token.val == '#t')
elif self.cur_token.type == 'NUMBER':
base = 10
num_str = self.cur_token.val
if num_str.startswith('#'):
if num_str[1] == 'x': base = 16
elif num_str[1] == 'o': base = 8
elif num_str[1] == 'b': base = 2
num_str = num_str[2:]
try:
retval = Number(int(num_str, base))
except ValueError as err:
self._parse_error('Invalid number')
elif self.cur_token.type == 'ID':
retval = Symbol(self.cur_token.val)
else:
self._parse_error('Unexpected token "%s"' % self.cur_token.val)
self._next_token()
return retval
def _list(self):
# Algorithm:
#
# 1. First parse all sub-datums into a sequential Python list.
# 2. Convert this list into nested Pair objects
#
# To handle the dot ('.'), dot_idx keeps track of the index in lst
# where the dot was specified.
#
self._match('LPAREN')
lst = []
dot_idx = -1
while True:
if not self.cur_token:
self._parse_error('Unmatched parentheses at end of input')
elif self.cur_token.type == 'RPAREN':
break
elif self.cur_token.type == 'ID' and self.cur_token.val == '.':
if dot_idx > 0:
self._parse_error('Invalid usage of "."')
dot_idx = len(lst)
self._match('ID')
else:
lst.append(self._datum())
# Figure out whether we have a dotted list and whether the dot was
# placed correctly
#
dotted_end = False
if dot_idx > 0:
if dot_idx == len(lst) - 1:
dotted_end = True
else:
self._parse_error('Invalid location for "." in list')
self._match('RPAREN')
if dotted_end:
cur_cdr = lst[-1]
lst = lst[:-1]
else:
cur_cdr = None
for datum in reversed(lst):
cur_cdr = Pair(datum, cur_cdr)
return cur_cdr
def _abbreviation(self):
quotepos = self.cur_token.pos
self._match('QUOTE')
datum = self._datum()
return Pair(Symbol('quote'), Pair(datum, None))
class BobLexer(Lexer):
""" Partial Scheme lexer based on R5RS 7.1.1 (Lexical structure).
"""
def __init__(self):
rules = self._lexing_rules()
super(BobLexer, self).__init__(rules, skip_whitespace=True)
def _lexing_rules(self):
# Regex helpers
#
digit_2 = r'[0-1]'
digit_8 = r'[0-7]'
digit_10 = r'[0-9]'
digit_16 = r'[0-9A-Fa-f]'
radix_2 = r'\#b'
radix_8 = r'\#o'
radix_10 = r'(\#d)?'
radix_16 = r'\#x'
number = r'(%s%s+|%s%s+|%s%s+|%s%s+)' %(
radix_2, digit_2,
radix_8, digit_8,
radix_10, digit_10,
radix_16, digit_16,)
special_initial = r'[!$%&*.:<=>?^_~]'
initial = '([a-zA-Z]|'+special_initial+')'
special_subsequent = r'[+-.@]'
subsequent = '(%s|%s|%s)' % (initial, digit_10, special_subsequent)
peculiar_identifier = r'([+\-.]|\.\.\.)'
identifier = '(%s%s*|%s)' % (initial, subsequent, peculiar_identifier)
rules = [
(r';[^\n]*', 'COMMENT'),
(r'\#[tf]', 'BOOLEAN'),
(number, 'NUMBER'),
(identifier, 'ID'),
(r'\(', 'LPAREN'),
(r'\)', 'RPAREN'),
(r'\'', 'QUOTE'),
]
return rules
#-------------------------------------------------------------------------------
if __name__ == '__main__':
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._reports_operations import build_list_by_api_request, build_list_by_geo_request, build_list_by_operation_request, build_list_by_product_request, build_list_by_request_request, build_list_by_subscription_request, build_list_by_time_request, build_list_by_user_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReportsOperations:
"""ReportsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~api_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_api(
self,
resource_group_name: str,
service_name: str,
filter: str,
top: Optional[int] = None,
skip: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReportCollection"]:
"""Lists report records by API.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param orderby: OData order by query option.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReportCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.ReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_api_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=self.list_by_api.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_api_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/byApi'} # type: ignore
@distributed_trace
def list_by_user(
self,
resource_group_name: str,
service_name: str,
filter: str,
top: Optional[int] = None,
skip: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReportCollection"]:
"""Lists report records by User.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| timestamp |
filter | ge, le | | </br>| displayName | select, orderBy | | | </br>| userId |
select, filter | eq | | </br>| apiRegion | filter | eq | | </br>| productId | filter |
eq | | </br>| subscriptionId | filter | eq | | </br>| apiId | filter | eq | |
</br>| operationId | filter | eq | | </br>| callCountSuccess | select, orderBy | |
| </br>| callCountBlocked | select, orderBy | | | </br>| callCountFailed | select,
orderBy | | | </br>| callCountOther | select, orderBy | | | </br>|
callCountTotal | select, orderBy | | | </br>| bandwidth | select, orderBy | | |
</br>| cacheHitsCount | select | | | </br>| cacheMissCount | select | | |
</br>| apiTimeAvg | select, orderBy | | | </br>| apiTimeMin | select | | |
</br>| apiTimeMax | select | | | </br>| serviceTimeAvg | select | | | </br>|
serviceTimeMin | select | | | </br>| serviceTimeMax | select | | | </br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param orderby: OData order by query option.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReportCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.ReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_user_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=self.list_by_user.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_user_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_user.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/byUser'} # type: ignore
@distributed_trace
def list_by_operation(
self,
resource_group_name: str,
service_name: str,
filter: str,
top: Optional[int] = None,
skip: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReportCollection"]:
"""Lists report records by API Operations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| timestamp |
filter | ge, le | | </br>| displayName | select, orderBy | | | </br>| apiRegion |
filter | eq | | </br>| userId | filter | eq | | </br>| productId | filter | eq | |
</br>| subscriptionId | filter | eq | | </br>| apiId | filter | eq | | </br>|
operationId | select, filter | eq | | </br>| callCountSuccess | select, orderBy | |
| </br>| callCountBlocked | select, orderBy | | | </br>| callCountFailed | select,
orderBy | | | </br>| callCountOther | select, orderBy | | | </br>|
callCountTotal | select, orderBy | | | </br>| bandwidth | select, orderBy | | |
</br>| cacheHitsCount | select | | | </br>| cacheMissCount | select | | |
</br>| apiTimeAvg | select, orderBy | | | </br>| apiTimeMin | select | | |
</br>| apiTimeMax | select | | | </br>| serviceTimeAvg | select | | | </br>|
serviceTimeMin | select | | | </br>| serviceTimeMax | select | | | </br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param orderby: OData order by query option.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReportCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.ReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_operation_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=self.list_by_operation.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_operation_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/byOperation'} # type: ignore
@distributed_trace
def list_by_product(
self,
resource_group_name: str,
service_name: str,
filter: str,
top: Optional[int] = None,
skip: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReportCollection"]:
"""Lists report records by Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| timestamp |
filter | ge, le | | </br>| displayName | select, orderBy | | | </br>| apiRegion |
filter | eq | | </br>| userId | filter | eq | | </br>| productId | select, filter | eq
| | </br>| subscriptionId | filter | eq | | </br>| callCountSuccess | select, orderBy |
| | </br>| callCountBlocked | select, orderBy | | | </br>| callCountFailed |
select, orderBy | | | </br>| callCountOther | select, orderBy | | | </br>|
callCountTotal | select, orderBy | | | </br>| bandwidth | select, orderBy | | |
</br>| cacheHitsCount | select | | | </br>| cacheMissCount | select | | |
</br>| apiTimeAvg | select, orderBy | | | </br>| apiTimeMin | select | | |
</br>| apiTimeMax | select | | | </br>| serviceTimeAvg | select | | | </br>|
serviceTimeMin | select | | | </br>| serviceTimeMax | select | | | </br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param orderby: OData order by query option.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReportCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.ReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_product_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=self.list_by_product.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_product_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/byProduct'} # type: ignore
@distributed_trace
def list_by_geo(
self,
resource_group_name: str,
service_name: str,
filter: str,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReportCollection"]:
"""Lists report records by geography.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| timestamp |
filter | ge, le | | </br>| country | select | | | </br>| region | select | |
| </br>| zip | select | | | </br>| apiRegion | filter | eq | | </br>| userId |
filter | eq | | </br>| productId | filter | eq | | </br>| subscriptionId | filter | eq
| | </br>| apiId | filter | eq | | </br>| operationId | filter | eq | | </br>|
callCountSuccess | select | | | </br>| callCountBlocked | select | | | </br>|
callCountFailed | select | | | </br>| callCountOther | select | | | </br>|
bandwidth | select, orderBy | | | </br>| cacheHitsCount | select | | | </br>|
cacheMissCount | select | | | </br>| apiTimeAvg | select | | | </br>|
apiTimeMin | select | | | </br>| apiTimeMax | select | | | </br>|
serviceTimeAvg | select | | | </br>| serviceTimeMin | select | | | </br>|
serviceTimeMax | select | | | </br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReportCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.ReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_geo_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=self.list_by_geo.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_geo_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_geo.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/byGeo'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
resource_group_name: str,
service_name: str,
filter: str,
top: Optional[int] = None,
skip: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReportCollection"]:
"""Lists report records by subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| timestamp |
filter | ge, le | | </br>| displayName | select, orderBy | | | </br>| apiRegion |
filter | eq | | </br>| userId | select, filter | eq | | </br>| productId | select,
filter | eq | | </br>| subscriptionId | select, filter | eq | | </br>| callCountSuccess
| select, orderBy | | | </br>| callCountBlocked | select, orderBy | | | </br>|
callCountFailed | select, orderBy | | | </br>| callCountOther | select, orderBy | |
| </br>| callCountTotal | select, orderBy | | | </br>| bandwidth | select, orderBy |
| | </br>| cacheHitsCount | select | | | </br>| cacheMissCount | select | |
| </br>| apiTimeAvg | select, orderBy | | | </br>| apiTimeMin | select | | |
</br>| apiTimeMax | select | | | </br>| serviceTimeAvg | select | | | </br>|
serviceTimeMin | select | | | </br>| serviceTimeMax | select | | | </br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param orderby: OData order by query option.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReportCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.ReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
orderby=orderby,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/bySubscription'} # type: ignore
@distributed_trace
def list_by_time(
self,
resource_group_name: str,
service_name: str,
filter: str,
interval: datetime.timedelta,
top: Optional[int] = None,
skip: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReportCollection"]:
"""Lists report records by Time.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| timestamp |
filter, select | ge, le | | </br>| interval | select | | | </br>| apiRegion |
filter | eq | | </br>| userId | filter | eq | | </br>| productId | filter | eq | |
</br>| subscriptionId | filter | eq | | </br>| apiId | filter | eq | | </br>|
operationId | filter | eq | | </br>| callCountSuccess | select | | | </br>|
callCountBlocked | select | | | </br>| callCountFailed | select | | | </br>|
callCountOther | select | | | </br>| bandwidth | select, orderBy | | | </br>|
cacheHitsCount | select | | | </br>| cacheMissCount | select | | | </br>|
apiTimeAvg | select | | | </br>| apiTimeMin | select | | | </br>| apiTimeMax |
select | | | </br>| serviceTimeAvg | select | | | </br>| serviceTimeMin |
select | | | </br>| serviceTimeMax | select | | | </br>.
:type filter: str
:param interval: By time interval. Interval must be multiple of 15 minutes and may not be zero.
The value should be in ISO 8601 format (http://en.wikipedia.org/wiki/ISO_8601#Durations).This
code can be used to convert TimeSpan to a valid interval string: XmlConvert.ToString(new
TimeSpan(hours, minutes, seconds)).
:type interval: ~datetime.timedelta
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param orderby: OData order by query option.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReportCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.ReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_time_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
interval=interval,
top=top,
skip=skip,
orderby=orderby,
template_url=self.list_by_time.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_time_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
interval=interval,
top=top,
skip=skip,
orderby=orderby,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_time.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/byTime'} # type: ignore
@distributed_trace
def list_by_request(
self,
resource_group_name: str,
service_name: str,
filter: str,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.RequestReportCollection"]:
"""Lists report records by Request.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| timestamp |
filter | ge, le | | </br>| apiId | filter | eq | | </br>| operationId | filter | eq |
| </br>| productId | filter | eq | | </br>| userId | filter | eq | | </br>| apiRegion |
filter | eq | | </br>| subscriptionId | filter | eq | | </br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RequestReportCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.RequestReportCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RequestReportCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_request_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=self.list_by_request.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_request_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("RequestReportCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_request.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/reports/byRequest'} # type: ignore
|
|
import os, sys, hashlib, re, gzip, random
from os.path import isdir, basename
# We use forward slashes for paths even on Windows
# in order for consistency with URLs.
# For now, still use os.path.join instead of posixpath.join
# in case we're relying on correct behavior with absolute
# Windows paths. Also consider integrating this with
# urljoin using heuristics?
def join(path, *paths):
return os.path.join(path, *paths).replace('\\', '/')
def normpath(path):
return os.path.normpath(path).replace('\\', '/')
def abspath(path):
return os.path.abspath(path).replace('\\', '/')
def relpath(path, start=os.curdir):
return os.path.relpath(path, start).replace('\\', '/')
def subPrematchedText(matches, replacement, originalText):
"""
Returns: originalText with matches replaced by replacements.
matches: iterable collection of re.Match objects
replacement: function or template-string as in re.sub()
originalText: the exact string that the Matches were created from
(exact since string indexing by Match.begin()/end() is used).
Works on all-bytes or all-str objects equally well.
"""
if isinstance(replacement, (str, bytes)):
replacementTemplate = replacement
replacement = lambda match: match.expand(replacementTemplate)
resultBuilder = []
leftOffAtInFileIdx = 0
for match in matches:
resultBuilder.append(originalText[leftOffAtInFileIdx : match.start()])
resultBuilder.append(replacement(match))
leftOffAtInFileIdx = match.end()
resultBuilder.append(originalText[leftOffAtInFileIdx:])
return type(originalText)().join(resultBuilder)
def popiter(collection):
try:
while True:
yield collection.pop()
except (IndexError, KeyError):
pass
def make_transitive(relation, always_include_base_case = False, multiple_base_cases = False):
"""
relation: a function from a single object to an iterable set[1] of objects[2]
returns : a function from a single object[4] to an iterable set[3] of objects
always_include_base_case: if True, the returned function's returned set
will include the returned function's argument even if repeating the function
on the argument never produces the argument.
The returned function applies f not just once, but repeatedly to all
its returned objects. It collects the set of objects that have been
seen in the results of f until there are no more new returned objects.
make_transitive could equivalently be a two-argument function,
but something seemed elegant about it being an endomorphism.
examples:
modulo:
>>> sorted(make_transitive(lambda x: [(x + 1) % 3])(7))
[0, 1, 2]
substrings:
>>> sorted(make_transitive(lambda s: [s[1:], s[:-1]])('abcd'))
['', 'a', 'ab', 'abc', 'b', 'bc', 'bcd', 'c', 'cd', 'd']
>>> sorted(make_transitive(lambda s: [s[1:], s[:-1]], True)('abcd'))
['', 'a', 'ab', 'abc', 'abcd', 'b', 'bc', 'bcd', 'c', 'cd', 'd']
[1] any iterable will do; duplicates will be ignored
[2] these objects must be hashable
[3] not actually type 'set', but contains no duplicates
[4] or a collection of objects, if multiple_base_cases is True
"""
def ret(initial):
deps = set()
if multiple_base_cases:
newdeps = (list(set(initial)) if always_include_base_case
else list(set().union(*(relation(i) for i in set(initial)))))
else:
newdeps = [initial] if always_include_base_case else list(relation(initial))
for newdep in popiter(newdeps):
if newdep not in deps:
yield newdep; deps.add(newdep)
newdeps.extend(relation(newdep))
return ret
characters_that_are_easy_to_read_and_type = '23456789abcdefghijkmnpqrstuvwxyz'
alphanumeric_characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
def alnum_secret(length = 22):
"""
The default length of 22 gives at least 128 bits of entropy
(entropy = log2(62**length))
"""
rng = random.SystemRandom()
return ''.join(rng.choice(alphanumeric_characters) for _ in range(length))
def sha384file(path):
"""
Returns a hashlib hash object giving the sha384 of the argument
file's contents.
Hashes directory contents as a sequence of NUL-character-terminated
directory entries (doesn't use the contents of those files, just
their names in non-localized string order (which is alphabetical-ish)).
Why SHA-384? It is the best function available in hashlib. (As of this
writing, SHA-3 isn't in hashlib: august 2015 / python 3.4.)
sha224 and sha384 are less vulnerable to length extension attacks than
the others, and don't have many corresponding downsides. sha384 is
faster on 64-bit computers (which I develop on), and has more result bits
(for the unlikely chance that matters), so use sha384.
"""
# http://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
h = hashlib.sha384()
if isdir(path):
h.update(b''.join(p.encode()+b'\0' for p in sorted(os.listdir(path))))
else:
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(2**20), b''):
h.update(chunk)
return h
def file_re_sub(infile, outfile, *sub_args, **sub_kwargs):
"""
Calls re.sub(...) on the contents of infile and writes it to outfile.
The arguments to sub are passed after infile and outfile.
The file is opened in text or binary mode based on whether the
pattern is text or binary.
"""
pattern = sub_args[0] if len(sub_args) > 0 else sub_kwargs['pattern']
pattern = pattern if isinstance(pattern, (str, bytes)) else pattern.pattern
isbinary = isinstance(pattern, bytes)
old_contents = (read_file_binary if isbinary else read_file_text)(infile)
new_contents = re.sub(string=old_contents, *sub_args, **sub_kwargs)
(write_file_binary if isbinary else write_file_text)(outfile, new_contents)
def gzip_omitting_metadata(infile, outfile):
"""
Reads infile, gzips it at the maximum compression level, and writes
the gzipped version to outfile.
The gzip metadata 'filename' is left empty and 'mtime' is set to 0
in Unix time (which is 1970). An example situation you'd want this:
For gzip-encoded data sent with "Content-Encoding: gzip",
this metadata goes unused, and possibly wastes bandwidth or
leaks information that wasn't intended to be published
(most likely unimportant information, admittedly).
"""
with open(infile, 'rb') as f_in:
with open(outfile, 'wb') as f_out:
with gzip.GzipFile(filename='', fileobj=f_out, mtime=0, compresslevel=9) as f_gzip:
#per python docs http://docs.python.org/3/library/gzip.html
f_gzip.writelines(f_in)
def read_file_text(path):
with open(path, 'r', encoding='utf-8') as f:
return f.read()
def read_file_binary(path):
with open(path, 'rb') as f:
return f.read()
def write_file_text(path, data):
with open(path, 'w', encoding='utf-8') as f:
return f.write(data)
def write_file_binary(path, data):
with open(path, 'wb') as f:
return f.write(data)
def write_stdout_binary(data):
try: #python3
sys.stdout.buffer.write(data)
except AttributeError: #python2
sys.stdout.write(data)
class pushd(object):
def __init__(self, target):
self.target = target
def __enter__(self):
self.source = os.getcwd()
os.chdir(self.target)
def __exit__(self, type, value, traceback):
os.chdir(self.source)
def files_under(rootpath):
for dirpath, dirnames, filenames in os.walk(rootpath):
for f in filenames:
yield join(dirpath, f)
def relpath_files_under(rootpath):
for dirpath, dirnames, filenames in os.walk(rootpath):
for f in filenames:
yield relpath(join(dirpath, f), rootpath)
default_vcs_dirs = {'.git', '.hg', '.bzr', '.svn', '__pycache__'}
def relpath_files_under_excluding_vcs_etc(rootpath, vcs_dirs = default_vcs_dirs):
for dirpath, dirnames, filenames in os.walk(rootpath):
if basename(dirpath) in vcs_dirs:
dirnames[:] = []
else:
for f in filenames:
yield relpath(join(dirpath, f), rootpath)
def relpath_dirs_under(rootpath):
for dirpath, dirnames, filenames in os.walk(rootpath):
yield relpath(dirpath, rootpath)
def relpath_dirs_under_excluding_vcs_etc(rootpath, vcs_dirs = default_vcs_dirs):
for dirpath, dirnames, filenames in os.walk(rootpath):
if basename(dirpath) in vcs_dirs:
dirnames[:] = []
else:
yield relpath(dirpath, rootpath)
# for python2/3 doctest compatibility, and general doctest readability,
# use 'testprint'
class Shown(object):
def __init__(self, string):
self.string = string
def __str__(self):
return self.string
def __repr__(self):
return self.string
def destrbytes(data):
if isinstance(data, list):
return [destrbytes(d) for d in data]
if isinstance(data, tuple):
return tuple(destrbytes(d) for d in data)
if isinstance(data, bytes):
return Shown(data.decode('utf-8'))
if isinstance(data, str):
return Shown(data)
else:
raise "implement this"
def testprint(data):
print(destrbytes(data))
|
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import multiprocessing
import optparse
import os
from os.path import join
import shlex
import subprocess
import sys
import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = ["mjsunit", "cctest", "message", "preparser"]
TIMEOUT_DEFAULT = 60
TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
# Use this to run several variants of the tests.
VARIANT_FLAGS = {
"default": [],
"stress": ["--stress-opt", "--always-opt"],
"nocrankshaft": ["--nocrankshaft"]}
VARIANTS = ["default", "stress", "nocrankshaft"]
MODE_FLAGS = {
"debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap"],
"release" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants"]}
SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
"ia32",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_ia32",
"arm",
"mipsel",
"nacl_ia32",
"nacl_x64"]
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="dontcare")
result.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
result.add_option("--no-i18n", "--noi18n",
help="Skip internationalization tests",
default=False, action="store_true")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
dest="no_network", action="store_true")
result.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks',
default=False, dest="no_presubmit", action="store_true")
result.add_option("--no-stress", "--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, dest="no_stress", action="store_true")
result.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
result.add_option("--variants",
help="Comma-separated list of testing variants")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow/flaky tests)"))
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("--junitout", help="File name of the JUnit output")
result.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
return result
def ProcessOptions(options):
global VARIANT_FLAGS
global VARIANTS
# Architecture and mode related stuff.
if options.arch_and_mode:
options.arch_and_mode = [arch_and_mode.split(".")
for arch_and_mode in options.arch_and_mode.split(",")]
options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
if not mode.lower() in ["debug", "release", "optdebug"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
# Store the final configuration in arch_and_mode list. Don't overwrite
# predefined arch_and_mode since it is more expressive than arch and mode.
if not options.arch_and_mode:
options.arch_and_mode = itertools.product(options.arch, options.mode)
# Special processing of other options, sorted alphabetically.
if options.buildbot:
# Buildbots run presubmit tests as a separate step.
options.no_presubmit = True
options.no_network = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
options.no_network = True
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_stress, options.stress_only, options.no_variants,
bool(options.variants), options.quickcheck):
print("Use only one of --no-stress, --stress-only, --no-variants, "
"--variants, or --quickcheck.")
return False
if options.no_stress:
VARIANTS = ["default", "nocrankshaft"]
if options.no_variants:
VARIANTS = ["default"]
if options.stress_only:
VARIANTS = ["stress"]
if options.variants:
VARIANTS = options.variants.split(",")
if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
return False
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.flaky_tests = "skip"
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
return False
return True
if not CheckTestMode("flaky test", options.flaky_tests):
return False
if not CheckTestMode("slow test", options.slow_tests):
return False
if not CheckTestMode("pass|fail test", options.pass_fail_tests):
return False
if not options.no_i18n:
DEFAULT_TESTS.append("intl")
return True
def ShardTests(tests, shard_count, shard_run):
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
exit_code = 0
workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
if not options.no_presubmit:
print ">>> running presubmit tests"
code = subprocess.call(
[sys.executable, join(workspace, "tools", "presubmit.py")])
exit_code = code
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
if len(args) == 0:
suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
else:
args_suites = set()
for arg in args:
suite = arg.split(os.path.sep)[0]
if not suite in args_suites:
args_suites.add(suite)
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suites.append(suite)
if options.download_data:
for s in suites:
s.DownloadData()
for (arch, mode) in options.arch_and_mode:
try:
code = Execute(arch, mode, args, options, suites, workspace)
except KeyboardInterrupt:
return 2
exit_code = exit_code or code
return exit_code
def Execute(arch, mode, args, options, suites, workspace):
print(">>> Running tests for %s.%s" % (arch, mode))
shell_dir = options.shell_dir
if not shell_dir:
if options.buildbot:
shell_dir = os.path.join(workspace, options.outdir, mode)
mode = mode.lower()
else:
shell_dir = os.path.join(workspace, options.outdir,
"%s.%s" % (arch, mode))
shell_dir = os.path.relpath(shell_dir)
if mode == "optdebug":
mode = "debug" # "optdebug" is just an alias.
# Populate context object.
mode_flags = MODE_FLAGS[mode]
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= TIMEOUT_SCALEFACTOR[mode]
ctx = context.Context(arch, mode, shell_dir,
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
options.no_i18n)
# Find available test suites and read test cases from them.
variables = {
"mode": mode,
"arch": arch,
"system": utils.GuessOS(),
"isolates": options.isolates,
"deopt_fuzzer": False,
"no_i18n": options.no_i18n,
}
all_tests = []
num_tests = 0
test_id = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
s.tests = [ t.CopyAddingFlags(v)
for t in s.tests
for v in s.VariantFlags(t, variant_flags) ]
s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
test_id += 1
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
if num_tests == 0:
print "No tests to run."
return 0
# Run the tests, either locally or distributed on the network.
try:
start_time = time.time()
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
if options.junitout:
progress_indicator = progress.JUnitTestProgressIndicator(
progress_indicator, options.junitout, options.junittestsuite)
run_networked = not options.no_network
if not run_networked:
print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
run_networked = False
peers = []
if run_networked:
peers = network_execution.GetPeers()
if not peers:
print("No connection to distribution server; running tests locally.")
run_networked = False
elif len(peers) == 1:
print("No other peers on the network; running tests locally.")
run_networked = False
elif num_tests <= 100:
print("Less than 100 tests, running them locally.")
run_networked = False
if run_networked:
runner = network_execution.NetworkedRunner(suites, progress_indicator,
ctx, peers, workspace)
else:
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
if runner.terminate:
return exit_code
overall_duration = time.time() - start_time
except KeyboardInterrupt:
raise
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
return exit_code
if __name__ == "__main__":
sys.exit(Main())
|
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V2connectorsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_regions_by_credential_id(self, **kwargs):
"""
retrive regions by type
Each cloud provider has it's own specific resources like instance types and disk types. These endpoints are collecting them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_regions_by_credential_id(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PlatformResourceRequestJson body:
:return: RegionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_regions_by_credential_id_with_http_info(**kwargs)
else:
(data) = self.get_regions_by_credential_id_with_http_info(**kwargs)
return data
def get_regions_by_credential_id_with_http_info(self, **kwargs):
"""
retrive regions by type
Each cloud provider has it's own specific resources like instance types and disk types. These endpoints are collecting them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_regions_by_credential_id_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PlatformResourceRequestJson body:
:return: RegionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_regions_by_credential_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/connectors/regions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegionResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_vm_types_by_credential_id(self, **kwargs):
"""
retrive vmtype properties by credential
Each cloud provider has it's own specific resources like instance types and disk types. These endpoints are collecting them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_vm_types_by_credential_id(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PlatformResourceRequestJson body:
:return: PlatformVmtypesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_vm_types_by_credential_id_with_http_info(**kwargs)
else:
(data) = self.get_vm_types_by_credential_id_with_http_info(**kwargs)
return data
def get_vm_types_by_credential_id_with_http_info(self, **kwargs):
"""
retrive vmtype properties by credential
Each cloud provider has it's own specific resources like instance types and disk types. These endpoints are collecting them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_vm_types_by_credential_id_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PlatformResourceRequestJson body:
:return: PlatformVmtypesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_vm_types_by_credential_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/connectors/vmtypes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformVmtypesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
|
__author__ = 'mramire8'
__copyright__ = "Copyright 2014, ML Lab"
__version__ = "0.1"
__status__ = "Research"
import sys
import os
sys.path.append(os.path.abspath("."))
sys.path.append(os.path.abspath("../"))
sys.path.append(os.path.abspath("../experiment/"))
from experiment_utils import *
import argparse
import numpy as np
from sklearn.datasets.base import Bunch
from datautil.load_data import load_from_file
from sklearn import linear_model
import time
from sklearn import metrics
from collections import defaultdict
from strategy import structured
from expert import baseexpert
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import random
import nltk
import re
from scipy.sparse import diags
############# COMMAND LINE PARAMETERS ##################
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--train',
metavar='TRAIN',
default="imdb",
help='training data (libSVM format)')
ap.add_argument('--neutral-threshold',
metavar='NEUTRAL',
type=float,
default=.4,
help='neutrality threshold of uncertainty')
ap.add_argument('--expert-penalty',
metavar='EXPERT_PENALTY',
type=float,
default=0.1,
help='Expert penalty value for the classifier simulation')
ap.add_argument('--trials',
metavar='TRIALS',
type=int,
default=5,
help='number of trials')
ap.add_argument('--folds',
metavar='FOLDS',
type=int,
default=1,
help='number of folds')
ap.add_argument('--budget',
metavar='BUDGET',
type=int,
default=200,
help='budget')
ap.add_argument('--step-size',
metavar='STEP_SIZE',
type=int,
default=10,
help='instances to acquire at every iteration')
ap.add_argument('--bootstrap',
metavar='BOOTSTRAP',
type=int,
default=50,
help='size of the initial labeled dataset')
ap.add_argument('--cost-function',
metavar='COST_FUNCTION',
type=str,
default="uniform",
help='cost function of the x-axis [uniform|log|linear|direct]')
ap.add_argument('--cost-model',
metavar='COST_MODEL',
type=str,
default="[[10.0,5.7], [25.0,8.2], [50.1,10.9], [75,15.9], [100,16.7], [125,17.8], [150,22.7], [175,19.9], [200,17.4]]",
help='cost function parameters of the cost function')
ap.add_argument('--maxiter',
metavar='MAXITER',
type=int,
default=200,
help='Max number of iterations')
ap.add_argument('--seed',
metavar='SEED',
type=int,
default=876543210,
help='Max number of iterations')
args = ap.parse_args()
rand = np.random.mtrand.RandomState(args.seed)
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
print args
print
def print_features(coef, names):
""" Print sorted list of non-zero features/weights. """
print "\n".join('%s/%.2f' % (names[j], coef[j]) for j in np.argsort(coef)[::-1] if coef[j] != 0)
def sentences_average(pool, vct):
## COMPUTE: AVERAGE SENTENCES IN DOCUMENTS
tk = vct.build_tokenizer()
allwords = 0.
sum_sent = 0.
average_words = 0
min_sent = 10000
max_sent = 0
for docid, label in zip(pool.remaining, pool.target):
doc = pool.text[docid].replace("<br>", ". ")
doc = doc.replace("<br />", ". ")
isent = sent_detector.tokenize(doc)
sum_sent += len(isent)
min_sent = min(min_sent, len(isent))
max_sent = max(max_sent, len(isent))
for s in sent_detector.tokenize(doc):
average_words += len(tk(s))
allwords += 1
print("Average sentences fragments %s" % (sum_sent / len(pool.target)))
print("Min sentences fragments %s" % min_sent)
print("Max sentences fragments %s" % max_sent)
print("Total sentences fragments %s" % sum_sent)
print("Average size of sentence %s" % (average_words / allwords))
####################### MAIN ####################
def clean_html(data):
sent_train = []
print ("Cleaning text ... ")
for text in data:
doc = text.replace("<br>", ". ")
doc = doc.replace("<br />", ". ")
doc = re.sub(r"\.", ". ", doc)
# doc = re.sub(r"x*\.x*", ". ", doc)
sent_train.extend([doc])
return sent_train
def split_data_sentences(data, sent_detector, vct=CountVectorizer()):
sent_train = []
labels = []
tokenizer = vct.build_tokenizer()
print ("Spliting into sentences...")
## Convert the documents into sentences: train
for t, sentences in zip(data.target, sent_detector.batch_tokenize(data.data)):
sents = [s for s in sentences if len(tokenizer(s)) > 1]
sent_train.extend(sents) # at the sentences separately as individual documents
labels.extend([t] * len(sents)) # Give the label of the document to all its sentences
return labels, sent_train
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=1, max_df=1.0, binary=True, ngram_range=(1, 1),
token_pattern='\\b\\w+\\b')#, tokenizer=StemTokenizer())
vct = TfidfVectorizer(encoding='ISO-8859-1', min_df=1, max_df=1.0, binary=False, ngram_range=(1, 1),
token_pattern='\\b\\w+\\b')#, tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = 10 # max(10, args.fixk)
# if args.fixk < 0:
args.fixk = None
data, vct = load_from_file(args.train, [categories[3]], args.fixk, min_size, vct, raw=True)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
### SENTENCE TRANSFORMATION
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
## delete <br> to "." to recognize as end of sentence
data.train.data = clean_html(data.train.data)
data.test.data = clean_html(data.test.data)
labels, sent_train = split_data_sentences(data.train, sent_detector)
data.train.data = sent_train
data.train.target = np.array(labels)
labels, sent_train = split_data_sentences(data.test, sent_detector)
data.test.data = sent_train
data.test.target = np.array(labels)
print("Train:{}, Test:{}, {}".format(len(data.train.data), len(data.test.data), data.test.target.shape[0]))
## Get the features of the sentence dataset
data.train.bow = vct.fit_transform(data.train.data)
data.test.bow = vct.transform(data.test.data)
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=args.expert_penalty)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### STUDENT CLASSIFIER
clf = linear_model.LogisticRegression(penalty="l1", C=1)
# clf = set_classifier(args.classifier)
student = structured.AALStructured(model=clf, accuracy_model=None, budget=args.budget, seed=args.seed, vcn=vct,
subpool=250, cost_model=cost_model)
student.set_score_model(exp_clf)
print "\nStudent Classifier: %s" % clf
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print ("Sentence Classification")
t0 = time.time()
# predition = exp_clf.predict(data.train.bow)
predictions = exp_clf.predict_proba(data.train.bow)
unc = np.min(predictions, axis=1)
coef = exp_clf.coef_[0]
dc = diags(coef, 0)
# ind = np.argsort(coef)
fn = np.array(vct.get_feature_names())
# print fn[ind[:10]]
# print fn[ind[-10:]]
print "Features:%s " % len(fn)
auc = metrics.roc_auc_score(data.train.target, predictions[:, 1])
pred_y = exp_clf.classes_[np.argmax(predictions, axis=1)]
accu = metrics.accuracy_score(data.train.target, pred_y)
most_post = np.argsort(predictions[:, 0])
print()
print "\n".join('%s/%.2f' % (fn[j], coef[j]) for j in np.argsort(coef)[::-1] if coef[j] != 0)
print"*"*80
print("AUC:{}, Accu:{}".format(auc, accu))
print ("Size of predictions {} - {}".format(most_post.shape[0], predictions.shape[0]))
# print "*"*50
# print "Positive"
# for d in most_post[:10]:
# print d,
# print predictions[d],
# print data.train.target[d],
# print data.train.data[d]
#
# mm = data.train.bow[d] * dc # sentences feature vectors \times diagonal of coeficients. sentences by features
# print "\n".join("%.3f / %s" % (v, n) for v, f, n in zip(mm.A[0], data.train.bow[d].A[0,:], fn) if f > 0)
# print "-"*20
#
# print "*"*50
# print "Negative"
# for d in most_post[-10:]:
# print d,
# print predictions[d],
# print data.train.target[d],
# print data.train.data[d]
# mm = data.train.bow[d] * dc # sentences feature vectors \times diagonal of coeficients. sentences by features
# # print mm[mm > 0]
#
# print "\n".join("%.3f / %s" % (v, n) for v, f, n in zip(mm.A[0], data.train.bow[d].A[0,:], fn) if f > 0)
# print "-"*20
#
# print "*"*50
# print "Middle"
# m = len(most_post) / 2
# for d in most_post[m-50:m+50]:
# print d,
# print predictions[d],
# print data.train.target[d],
# print data.train.data[d]
# mm = data.train.bow[d] * dc # sentences feature vectors \times diagonal of coeficients. sentences by features
# # print mm[mm > 0]
#
# print "\n".join("%.3f / %s" % (v, n) for v, f, n in zip(mm.A[0], data.train.bow[d].A[0,:], fn) if f > 0)
# print "-"*20
print("Elapsed time %.3f" % (time.time() - t0))
def neutral_label(label):
if label is None:
return 0
else:
return 1
def format_query(query_labels):
string = ""
for l, q in query_labels:
string = string + "{0}".format(l)
for qi in q:
string = string + "\t{0:.2f} ".format(qi)
string = string + "\n"
return string
def main2():
# load paramters
# load data
# preprocess data
# set student
# set expert
# start loop
pass
if __name__ == '__main__':
main()
|
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system::
|==================== In-process ====================|== Out-of-process ==|
+----------+ +-------+ +--------+ +----------+ +---------+
| | => | Work | => | | => | Call Q | => | |
| | | Ids | | | | | | |
| | +-------+ | | +----------+ | |
| | | ... | | | |... | | |
| | | 6 | | | |5, call() | | |
| | | 7 | | | |... | | |
| Process | | ... | | Local | +----------+ | Process |
| Pool | +-------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +---------+ | | +----------+ | |
| | <=> | Work | <=> | | <= | Result Q | <= | |
| | | Items | | | | | | |
| | +---------+ | | +----------+ | |
| | |6: call()| | | |... | | |
| | | future| | | |4, result | | |
| | |... | | | |3, except | | |
+----------+ +---------+ +--------+ +----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the 'Work Items'
dict
- adds the id of the _WorkItem to the 'Work Ids' queue
Local worker thread:
- reads work ids from the 'Work Ids' queue and looks up the corresponding
WorkItem from the 'Work Items' dict: if the work item has been cancelled
then it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the 'Call Q'. New _CallItems are put in the 'Call Q'
until 'Call Q' is full. NOTE: the size of the 'Call Q' is kept small
because calls placed in the 'Call Q' can no longer be cancelled with
Future.cancel().
- reads _ResultItems from 'Result Q', updates the future stored in the
'Work Items' dict and deletes the dict entry
Process #1..n:
- reads _CallItems from 'Call Q', executes the calls, and puts the
resulting _ResultItems in 'Request Q'
"""
import atexit
import multiprocessing
import threading
import weakref
import sys
from . import _base
from ....extern.six.moves import range
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow
# the interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_thread_references = set()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
for thread_reference in _thread_references:
thread = thread_reference()
if thread is not None:
thread.join()
def _remove_dead_thread_references():
"""Remove inactive threads from _thread_references.
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
... t = ThreadPoolExecutor(max_workers=5)
... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
if thread_reference() is None:
_thread_references.discard(thread_reference)
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue, shutdown):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Parameters
----------
call_queue
A `multiprocessing.Queue` of _CallItems that will be read and
evaluated by the worker.
result_queue
A `multiprocessing.Queue` of _ResultItems that will written
to by the worker.
shutdown
A `multiprocessing.Event` that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
try:
call_item = call_queue.get(block=True, timeout=0.1)
except queue.Empty:
if shutdown.is_set():
return
else:
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Parameters
----------
pending_work_items
A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids
A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue
A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_manangement_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue,
shutdown_process_event):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Parameters
----------
executor_reference
A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process
A list of the multiprocessing.Process instances used as
workers.
pending_work_items
A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue
A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue
A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue
A multiprocessing.Queue of _ResultItems generated by the
process workers.
shutdown_process_event
A multiprocessing.Event used to signal the
process workers that they should exit when their work queue is
empty.
"""
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
try:
result_item = result_queue.get(block=True, timeout=0.1)
except queue.Empty:
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if _shutdown or executor is None or executor._shutdown_thread:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_process_event.set()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
return
del executor
else:
work_item = pending_work_items[result_item.work_id]
del pending_work_items[result_item.work_id]
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Parameters
----------
max_workers
The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_remove_dead_thread_references()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_process_event = multiprocessing.Event()
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(
target=_queue_manangement_worker,
args=(weakref.ref(self),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue,
self._shutdown_process_event))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_thread_references.add(weakref.ref(self._queue_management_thread))
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(target=_process_worker,
args=(self._call_queue,
self._result_queue,
self._shutdown_process_event))
p.start()
self._processes.add(p)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError(
'cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
self._start_queue_management_thread()
self._adjust_process_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if wait:
if self._queue_management_thread:
self._queue_management_thread.join()
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._shutdown_process_event = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
|
import unittest
import opentuner
import mock
import random
import numpy
from opentuner.search import manipulator
def faked_random(nums):
f = fake_random(nums)
def inner(*args, **kwargs):
return f.next()
return inner
def fake_random(nums):
i = 0
while True:
yield nums[i]
i = (i+1) % len(nums)
class PermutationOperatorTests(unittest.TestCase):
def setUp(self):
"""
Set up a few configurations. The values of the PermutationParameter are:
config1 - 0 1 2 3 4 5 6 7 8 9
config2 - 4 3 2 1 0 9 8 7 6 5
config3 - 1 0 4 2 7 9 5 3 6 8
"""
self.manipulator = manipulator.ConfigurationManipulator()
self.param1 = manipulator.PermutationParameter("param1", [0,1,2,3,4,5,6,7,8,9])
self.manipulator.add_parameter(self.param1)
self.cfg = self.manipulator.seed_config()
self.config1 = self.manipulator.seed_config()
self.config2 = self.manipulator.seed_config()
self.config3 = self.manipulator.seed_config()
# repeating values
self.config4 = self.manipulator.seed_config()
self.config5 = self.manipulator.seed_config()
self.param1.set_value(self.config1, [0,1,2,3,4,5,6,7,8,9])
self.param1.set_value(self.config2, [4,3,2,1,0,9,8,7,6,5])
self.param1.set_value(self.config3, [1,0,4,2,7,9,5,3,6,8])
# repeating values
self.param1.set_value(self.config4, [1,2,3,4,2,3,4,3,4,4])
self.param1.set_value(self.config5, [4,2,4,3,3,1,3,4,2,4])
@mock.patch('random.randint', side_effect=faked_random([1,6]))
def test_op2_random_swap_1_6(self, randint_func):
# operator shouuld swap the indices at 1 and 6
self.param1.op2_random_swap(self.cfg, self.config1)
self.assertEqual(self.param1.get_value(self.cfg),[0,6,2,3,4,5,1,7,8,9])
self.assertEqual(self.param1.get_value(self.config1),[0,1,2,3,4,5,6,7,8,9])
@mock.patch('random.randint', side_effect=faked_random([7,2]))
def test_op2_random_invert(self, randint_func):
#should reverse a section of length 3 starting at index given by randint
self.param1.op2_random_invert(self.cfg, self.config1)
self.assertEqual(self.param1.get_value(self.cfg),[0,1,2,3,4,5,6,9,8,7])
self.param1.op2_random_invert(self.cfg, self.config1)
self.assertEqual(self.param1.get_value(self.cfg),[0,1,4,3,2,5,6,7,8,9])
@mock.patch('random.randint', side_effect=faked_random([0]))
def test_op3_cross_PMX_str5(self, randint_func):
# should perform PMX with a cut at 0 and crossover size 5
self.param1.op3_cross(self.cfg, self.config1, self.config3,
xchoice='op3_cross_PMX', strength=0.5)
self.assertEqual(self.param1.get_value(self.cfg),[1,0,4,2,7,5,6,3,8,9])
@mock.patch('random.randint', side_effect=faked_random([5]))
@mock.patch('random.uniform', side_effect=faked_random([0.4]))
def test_op3_swarm_CX_no_cross(self, uniform_func, randint_func):
# should perform no cross
self.param1.op3_swarm(self.config1, self.config2, self.config3,
xchoice='op3_cross_CX', c=0.8)
self.assertEqual(self.param1.get_value(self.config1),[0,1,2,3,4,5,6,7,8,9])
@mock.patch('random.randint', side_effect=faked_random([5]))
@mock.patch('random.uniform', side_effect=faked_random([0.4]))
def test_op3_swarm_CX_cross_p1(self, uniform_func, randint_func):
# should cross the first parent
self.param1.op3_swarm(self.config1, self.config2, self.config3,
xchoice='op3_cross_CX', c=0.3, c1=0.5, c2="unused")
self.assertEqual(self.param1.get_value(self.config1),[0,1,2,3,4,9,6,7,8,5])
@mock.patch('random.randint', side_effect=faked_random([5]))
@mock.patch('random.uniform', side_effect=faked_random([0.4]))
def test_op3_swarm_CX_cross_p2(self, uniform_func, randint_func):
# should cross the second parent
self.param1.op3_swarm(self.config1, self.config2, self.config3,
xchoice='op3_cross_CX', c=0.3, c1=0.3, c2="unused")
self.assertEqual(self.param1.get_value(self.config1),[0,1,2,3,4,9,5,7,6,8])
@mock.patch('random.randint', side_effect=faked_random([5]))
def test_op3_cross_PX_5(self, randint_func):
# Random cut point = 5 (index = 4)
self.param1.op3_cross_PX(self.cfg, self.config1, self.config3, 2)
self.assertEqual(self.param1.get_value(self.cfg),[1,0,4,2,3,5,6,7,8,9])
@mock.patch('random.randint', side_effect=faked_random([2]))
def test_op3_cross_PMX_0_d4(self, randint_func):
# cut = 2, d = 4
self.param1.op3_cross_PMX(self.cfg, self.config2, self.config3, 4)
self.assertEqual(self.param1.get_value(self.cfg),[1,3,4,2,7,9,8,0,6,5])
@mock.patch('random.randint', side_effect=faked_random([0]))
def test_op3_cross_PMX_0_d5(self, randint_func):
# cut = 0, d = 5
self.param1.op3_cross_PMX(self.cfg, self.config1, self.config3, 5)
self.assertEqual(self.param1.get_value(self.cfg),[1,0,4,2,7,5,6,3,8,9])
@mock.patch('random.randint', side_effect=faked_random([4]))
def test_op3_cross_PMX_dups(self, randint_func):
# cut = 4, d = 5
self.param1.op3_cross_PMX(self.cfg, self.config5, self.config4, 5)
# [4,2,4,3,3,1,3,4,2,4]
# [1,2,3,4,2,3,4,3,4,4]
# expected:
# [1,2,4,3,2,3,4,3,4,4]
self.assertEqual(self.param1.get_value(self.cfg), [1,2,4,3,2,3,4,3,4,4])
@mock.patch('random.randint', side_effect=faked_random([5]))
def test_op3_cross_CX_5(self, randint_func):
# initial replacement at index 5
self.param1.op3_cross_CX(self.cfg, self.config1, self.config2, "unused")
self.assertEqual(self.param1.get_value(self.cfg),[0,1,2,3,4,9,6,7,8,5])
self.param1.op3_cross_CX(self.cfg, self.config1, self.config3, "unused")
self.assertEqual(self.param1.get_value(self.cfg),[0,1,2,3,4,9,5,7,6,8])
@mock.patch('random.randint', side_effect=faked_random([0]))
def test_op3_cross_CX_dups(self, randint_func):
# initial replacement at index 4
self.param1.op3_cross_CX(self.cfg, self.config5, self.config4, "unused")
# [4,2,4,3,3,1,3,4,2,4]
# [1,2,3,4,2,3,4,3,4,4]
# expected:
# [1,2,3,4,3,3,4,4,2,4]
self.assertEqual(self.param1.get_value(self.cfg), [1,2,3,4,3,3,4,4,2,4])
@mock.patch('random.randint', side_effect=faked_random([3]))
def test_op3_cross_OX1_3_d4(self, randint_func):
# cut at 3
# d = 4
self.param1.op3_cross_OX1(self.cfg, self.config1, self.config2, 4)
self.assertEqual(self.param1.get_value(self.cfg),[2,3,4,1,0,9,8,5,6,7])
self.param1.op3_cross_OX1(self.cfg, self.config1, self.config3, 4)
self.assertEqual(self.param1.get_value(self.cfg),[0,1,3,2,7,9,5,4,6,8])
@mock.patch('random.randint', side_effect=faked_random([4,2]))
def test_op3_cross_OX3_2_5_d4(self, randint_func):
# cuts at 4,2
# d = 4
self.param1.op3_cross_OX3(self.cfg, self.config1, self.config2, 4)
self.assertEqual(self.param1.get_value(self.cfg),[3,4,5,6,2,1,0,9,7,8])
self.param1.op3_cross_OX3(self.cfg, self.config1, self.config3, 4)
self.assertEqual(self.param1.get_value(self.cfg),[0,1,3,5,4,2,7,9,6,8])
class FloatArrayOperatorTests(unittest.TestCase):
"""
also tests the operators for Array (since Array is abstract)
"""
def setUp(self):
"""
Set up a few configurations. The values of the FloatArray are:
config1 - 1.0 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9
config2 - 2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 2.8 2.9
config3 - 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9
"""
self.manipulator = manipulator.ConfigurationManipulator()
self.param1 = manipulator.FloatArray("param1", 10, 4, 0)
self.manipulator.add_parameter(self.param1)
self.cfg = self.manipulator.seed_config()
self.config1 = self.manipulator.seed_config()
self.config2 = self.manipulator.seed_config()
self.config3 = self.manipulator.seed_config()
self.param1.set_value(self.config1, numpy.array([1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9]))
self.param1.set_value(self.config2, numpy.array([2.0,2.1,2.2,2.3,2.4,2.5,2.6,2.7,2.8,2.9]))
self.param1.set_value(self.config3, numpy.array([3.0,3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.8]))
@mock.patch('random.randint', side_effect=faked_random([3]))
def test_op3_cross_3_str4(self, randint_func):
self.param1.op3_cross(self.cfg, self.config1, self.config2, strength=0.4)
val = self.param1.get_value(self.cfg)
expected = [1.0,1.1,1.2,2.3,2.4,2.5,2.6,1.7,1.8,1.9]
for i in range(len(val)):
self.assertAlmostEqual(val[i], expected[i])
@mock.patch('random.randint', side_effect=faked_random([3]))
@mock.patch('random.uniform', side_effect=faked_random([0.4]))
def test_op3_swarm_no_cross(self, uniform_func, randint_func):
#should perform no cross
self.param1.op3_swarm(self.config1, self.config2, self.config3,
xchoice='op3_cross_CX', c=0.8)
val = self.param1.get_value(self.config1)
expected = [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9]
for i in range(len(val)):
self.assertAlmostEqual(val[i], expected[i])
@mock.patch('random.randint', side_effect=faked_random([3]))
@mock.patch('random.uniform', side_effect=faked_random([0.4]))
def test_op3_swarm_cross_p1(self, uniform_func, randint_func):
#should cross the first parent
self.param1.op3_swarm(self.config1, self.config2, self.config3,
xchoice='op3_cross_CX', c=0.3, c1=0.5, c2="unused")
val = self.param1.get_value(self.config1)
expected = [1.0,1.1,1.2,2.3,2.4,2.5,1.6,1.7,1.8,1.9]
for i in range(len(val)):
self.assertAlmostEqual(val[i], expected[i])
@mock.patch('random.randint', side_effect=faked_random([3]))
@mock.patch('random.uniform', side_effect=faked_random([0.4]))
def test_op3_swarm_cross_p2(self, uniform_func, randint_func):
#should cross the second parent
self.param1.op3_swarm(self.config1, self.config2, self.config3,
xchoice='op3_cross_CX', c=0.3, c1=0.3, c2="unused")
val = self.param1.get_value(self.config1)
expected = [1.0,1.1,1.2,3.3,3.4,3.5,1.6,1.7,1.8,1.9]
self.assertEqual(len(val),len(expected))
for i in range(len(val)):
self.assertAlmostEqual(val[i], expected[i])
@mock.patch('random.random', side_effect=faked_random([0.2, 0.4]))
def test_op3_swarm_parallel(self, random_func):
# r1 = 0.2, r2 = 0.4, velocities = [-2,0,0,0,0,0,1,1.5,2,3]
# max and min are 4, 0
velocities = numpy.array([-2.0,0.0,0,0,0,0,1.0,1.5,2,3.0])
vs = self.param1.op3_swarm_parallel(self.config1, self.config2, self.config3, velocities=velocities)
vs_expected = [-1.5,.5,.5,.5,.5,.5,1.5,2.0,2.5,3.48]
self.assertEqual(len(vs),len(vs_expected))
for i in range(len(vs)):
self.assertAlmostEqual(vs[i], vs_expected[i])
val = self.param1.get_value(self.config1)
expected = [0,1.6,1.7,1.8,1.9,2.0,3.1,3.7,4,4]
self.assertEqual(len(val),len(expected))
for i in range(len(val)):
self.assertAlmostEqual(val[i], expected[i])
|
|
#-*- coding: utf-8 -*-
import os
from django.test import TestCase
from django.core.urlresolvers import reverse
import django.core.files
from django.contrib.admin import helpers
from django.contrib.auth.models import User
from django.conf import settings
from filer.models.filemodels import File
from filer.models.foldermodels import Folder, FolderPermission
from filer.models.imagemodels import Image
from filer.models.clipboardmodels import Clipboard
from filer.models.virtualitems import FolderRoot
from filer.models import tools
from filer.tests.helpers import (create_superuser, create_folder_structure,
create_image, SettingsOverride)
from filer import settings as filer_settings
class FilerFolderAdminUrlsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
def tearDown(self):
self.client.logout()
def test_filer_app_index_get(self):
response = self.client.get(reverse('admin:app_list', args=('filer',)))
self.assertEqual(response.status_code, 200)
def test_filer_make_root_folder_get(self):
response = self.client.get(reverse('admin:filer-directory_listing-make_root_folder')+"?_popup=1")
self.assertEqual(response.status_code, 200)
def test_filer_make_root_folder_post(self):
FOLDER_NAME = "root folder 1"
self.assertEqual(Folder.objects.count(), 0)
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'),
{
"name":FOLDER_NAME,
})
self.assertEqual(Folder.objects.count(), 1)
self.assertEqual(Folder.objects.all()[0].name, FOLDER_NAME)
#TODO: not sure why the status code is 200
self.assertEqual(response.status_code, 200)
def test_filer_remember_last_opened_directory(self):
folder = Folder.objects.create(name='remember me please')
get_last_folder = lambda: self.client.get(reverse('admin:filer-directory_listing-last'), follow=True)
self.client.get(reverse('admin:filer-directory_listing', kwargs={'folder_id': folder.id}))
self.assertEqual(int(self.client.session['filer_last_folder_id']), folder.id)
self.assertEqual(get_last_folder().context['folder'], folder)
# let's test fallback
folder.delete()
self.assertTrue(isinstance(get_last_folder().context['folder'], FolderRoot))
def test_filer_directory_listing_root_empty_get(self):
response = self.client.post(reverse('admin:filer-directory_listing-root'))
self.assertEqual(response.status_code, 200)
def test_filer_directory_listing_root_get(self):
create_folder_structure(depth=3, sibling=2, parent=None)
response = self.client.post(reverse('admin:filer-directory_listing-root'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['folder'].children.count(), 6)
def test_validate_no_duplcate_folders(self):
FOLDER_NAME = "root folder 1"
self.assertEqual(Folder.objects.count(), 0)
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'), {
"name":FOLDER_NAME,
"_popup": 1
})
self.assertEqual(Folder.objects.count(), 1)
self.assertEqual(Folder.objects.all()[0].name, FOLDER_NAME)
# and create another one
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'),
{"name":FOLDER_NAME, "_popup": 1})
# second folder didn't get created
self.assertEqual(Folder.objects.count(), 1)
self.assertContains(response, 'Folder with this name already exists')
def test_validate_no_duplcate_folders_on_rename(self):
self.assertEqual(Folder.objects.count(), 0)
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'), {
"name": "foo",
"_popup": 1})
self.assertEqual(Folder.objects.count(), 1)
self.assertEqual(Folder.objects.all()[0].name, "foo")
# and create another one
response = self.client.post(reverse('admin:filer-directory_listing-make_root_folder'), {
"name": "bar",
"_popup": 1})
self.assertEqual(Folder.objects.count(), 2)
bar = Folder.objects.get(name="bar")
response = self.client.post("/admin/filer/folder/%d/" % bar.pk, {
"name": "foo",
"_popup": 1})
self.assertContains(response, 'Folder with this name already exists')
# refresh from db and validate that it's name didn't change
bar = Folder.objects.get(pk=bar.pk)
self.assertEqual(bar.name, "bar")
def test_change_folder_owner_keep_name(self):
folder = Folder.objects.create(name='foobar')
another_superuser = User.objects.create_superuser(
'gigi', 'admin@ignore.com', 'secret')
response = self.client.post('/admin/filer/folder/%d/' % folder.pk, {
'owner': another_superuser.pk,
'name': 'foobar',
'_continue': 'Save and continue editing'})
# succesfful POST returns a redirect
self.assertEqual(response.status_code, 302)
folder = Folder.objects.get(pk=folder.pk)
self.assertEqual(folder.owner.pk, another_superuser.pk)
class FilerImageAdminUrlsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
def tearDown(self):
self.client.logout()
class FilerClipboardAdminUrlsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, self.image_name)
self.img.save(self.filename, 'JPEG')
def tearDown(self):
self.client.logout()
os.remove(self.filename)
for img in Image.objects.all():
img.delete()
def test_filer_upload_file(self, extra_headers={}):
self.assertEqual(Image.objects.count(), 0)
file_obj = django.core.files.File(open(self.filename, 'rb'))
response = self.client.post(
reverse('admin:filer-ajax_upload'),
{'Filename': self.image_name, 'Filedata': file_obj, 'jsessionid': self.client.session.session_key,},
**extra_headers
)
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(Image.objects.all()[0].original_filename, self.image_name)
def test_filer_ajax_upload_file(self):
self.assertEqual(Image.objects.count(), 0)
file_obj = django.core.files.File(open(self.filename, 'rb'))
response = self.client.post(
reverse('admin:filer-ajax_upload')+'?filename=%s' % self.image_name,
data=file_obj.read(),
content_type='application/octet-stream',
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
)
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(Image.objects.all()[0].original_filename, self.image_name)
class BulkOperationsMixin(object):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(settings.FILE_UPLOAD_TEMP_DIR,
self.image_name)
self.img.save(self.filename, 'JPEG')
self.create_src_and_dst_folders()
self.folder = Folder.objects.create(name="root folder", parent=None)
self.sub_folder1 = Folder.objects.create(name="sub folder 1", parent=self.folder)
self.sub_folder2 = Folder.objects.create(name="sub folder 2", parent=self.folder)
self.image_obj = self.create_image(self.src_folder)
self.create_file(self.folder)
self.create_file(self.folder)
self.create_image(self.folder)
self.create_image(self.sub_folder1)
self.create_file(self.sub_folder1)
self.create_file(self.sub_folder1)
self.create_image(self.sub_folder2)
self.create_image(self.sub_folder2)
def tearDown(self):
self.client.logout()
os.remove(self.filename)
for f in File.objects.all():
f.delete()
for folder in Folder.objects.all():
folder.delete()
def create_src_and_dst_folders(self):
self.src_folder = Folder(name="Src", parent=None)
self.src_folder.save()
self.dst_folder = Folder(name="Dst", parent=None)
self.dst_folder.save()
def create_image(self, folder, filename=None):
filename = filename or 'test_image.jpg'
file_obj = django.core.files.File(open(self.filename, 'rb'), name=filename)
image_obj = Image.objects.create(owner=self.superuser, original_filename=self.image_name, file=file_obj, folder=folder)
image_obj.save()
return image_obj
def create_file(self, folder, filename=None):
filename = filename or 'test_file.dat'
file_data = django.core.files.base.ContentFile('some data')
file_data.name = filename
file_obj = File.objects.create(owner=self.superuser, original_filename=filename, file=file_data, folder=folder)
file_obj.save()
return file_obj
class FilerBulkOperationsTests(BulkOperationsMixin, TestCase):
def test_move_files_and_folders_action(self):
# TODO: Test recursive (files and folders tree) move
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'move_files_and_folders',
'post': 'yes',
'destination': self.dst_folder.id,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 0)
self.assertEqual(self.dst_folder.files.count(), 1)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.dst_folder.id,
})
response = self.client.post(url, {
'action': 'move_files_and_folders',
'post': 'yes',
'destination': self.src_folder.id,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
def test_validate_no_duplicate_folders_on_move(self):
"""Create the following folder hierarchy:
root
|
|--foo
| |-bar
|
|--bar
and try to move the owter bar in foo. This has to fail since it would result
in two folders with the same name and parent.
"""
root = Folder.objects.create(name='root', owner=self.superuser)
foo = Folder.objects.create(name='foo', parent=root, owner=self.superuser)
bar = Folder.objects.create(name='bar', parent=root, owner=self.superuser)
foos_bar = Folder.objects.create(name='bar', parent=foo, owner=self.superuser)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': root.pk,
})
response = self.client.post(url, {
'action': 'move_files_and_folders',
'post': 'yes',
'destination': foo.pk,
helpers.ACTION_CHECKBOX_NAME: 'folder-%d' % (bar.pk,),
})
# refresh from db and validate that it hasn't been moved
bar = Folder.objects.get(pk=bar.pk)
self.assertEqual(bar.parent.pk, root.pk)
def test_move_to_clipboard_action(self):
# TODO: Test recursive (files and folders tree) move
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'move_to_clipboard',
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 0)
self.assertEqual(self.dst_folder.files.count(), 0)
clipboard = Clipboard.objects.get(user=self.superuser)
self.assertEqual(clipboard.files.count(), 1)
tools.move_files_from_clipboard_to_folder(clipboard, self.src_folder)
tools.discard_clipboard(clipboard)
self.assertEqual(clipboard.files.count(), 0)
self.assertEqual(self.src_folder.files.count(), 1)
def test_files_set_public_action(self):
self.image_obj.is_public = False
self.image_obj.save()
self.assertEqual(self.image_obj.is_public, False)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'files_set_public',
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.image_obj = Image.objects.get(id=self.image_obj.id)
self.assertEqual(self.image_obj.is_public, True)
def test_files_set_private_action(self):
self.image_obj.is_public = True
self.image_obj.save()
self.assertEqual(self.image_obj.is_public, True)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'files_set_private',
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.image_obj = Image.objects.get(id=self.image_obj.id)
self.assertEqual(self.image_obj.is_public, False)
self.image_obj.is_public = True
self.image_obj.save()
def test_copy_files_and_folders_action(self):
# TODO: Test recursive (files and folders tree) copy
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 0)
self.assertEqual(self.image_obj.original_filename, 'test_file.jpg')
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'copy_files_and_folders',
'post': 'yes',
'suffix': 'test',
'destination': self.dst_folder.id,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.assertEqual(self.src_folder.files.count(), 1)
self.assertEqual(self.dst_folder.files.count(), 1)
self.assertEqual(self.src_folder.files[0].id, self.image_obj.id)
dst_image_obj = self.dst_folder.files[0]
self.assertEqual(dst_image_obj.original_filename, 'test_filetest.jpg')
class FilerDeleteOperationTests(BulkOperationsMixin, TestCase):
def test_delete_files_or_folders_action(self):
self.assertNotEqual(File.objects.count(), 0)
self.assertNotEqual(Image.objects.count(), 0)
self.assertNotEqual(Folder.objects.count(), 0)
url = reverse('admin:filer-directory_listing-root')
folders = []
for folder in FolderRoot().children.all():
folders.append('folder-%d' % (folder.id,))
response = self.client.post(url, {
'action': 'delete_files_or_folders',
'post': 'yes',
helpers.ACTION_CHECKBOX_NAME: folders,
})
self.assertEqual(File.objects.count(), 0)
self.assertEqual(Folder.objects.count(), 0)
def test_delete_files_or_folders_action_with_mixed_types(self):
# add more files/images so we can test the polymorphic queryset with multiple types
self.create_file(folder=self.src_folder)
self.create_image(folder=self.src_folder)
self.create_file(folder=self.src_folder)
self.assertNotEqual(File.objects.count(), 0)
self.assertNotEqual(Image.objects.count(), 0)
url = reverse('admin:filer-directory_listing', args=(self.folder.id,))
folders = []
for f in File.objects.filter(folder=self.folder):
folders.append('file-%d' % (f.id,))
folders.append('folder-%d' % self.sub_folder1.id)
response = self.client.post(url, {
'action': 'delete_files_or_folders',
'post': 'yes',
helpers.ACTION_CHECKBOX_NAME: folders,
})
self.assertEqual(File.objects.filter(folder__in=[self.folder.id, self.sub_folder1.id]).count(), 0)
class FilerResizeOperationTests(BulkOperationsMixin, TestCase):
def test_resize_images_action(self):
# TODO: Test recursive (files and folders tree) processing
self.assertEqual(self.image_obj.width, 800)
self.assertEqual(self.image_obj.height, 600)
url = reverse('admin:filer-directory_listing', kwargs={
'folder_id': self.src_folder.id,
})
response = self.client.post(url, {
'action': 'resize_images',
'post': 'yes',
'width': 42,
'height': 42,
'crop': True,
'upscale': False,
helpers.ACTION_CHECKBOX_NAME: 'file-%d' % (self.image_obj.id,),
})
self.image_obj = Image.objects.get(id=self.image_obj.id)
self.assertEqual(self.image_obj.width, 42)
self.assertEqual(self.image_obj.height, 42)
class PermissionAdminTest(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
def tearDown(self):
self.client.logout()
def test_render_add_view(self):
"""
Really stupid and simple test to see if the add Permission view can be rendered
"""
response = self.client.get(reverse('admin:filer_folderpermission_add'))
self.assertEqual(response.status_code, 200)
class FolderListingTest(TestCase):
def setUp(self):
superuser = create_superuser()
self.staff_user = User.objects.create_user(
username='joe', password='x', email='joe@mata.com')
self.staff_user.is_staff = True
self.staff_user.save()
self.parent = Folder.objects.create(name='bar', parent=None, owner=superuser)
self.foo_folder = Folder.objects.create(name='foo', parent=self.parent, owner=self.staff_user)
self.bar_folder = Folder.objects.create(name='bar', parent=self.parent, owner=superuser)
self.baz_folder = Folder.objects.create(name='baz', parent=self.parent, owner=superuser)
file_data = django.core.files.base.ContentFile('some data')
file_data.name = 'spam'
self.spam_file = File.objects.create(
owner=superuser, original_filename='spam',
file=file_data, folder=self.parent)
self.client.login(username='joe', password='x')
def test_with_permissions_disabled(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=False):
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees all items: FOO, BAR, BAZ, SAMP
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk, self.bar_folder.pk, self.baz_folder.pk,
self.spam_file.pk]))
def test_folder_ownership(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=True):
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees only 1 folder : FOO
# he doesn't see BAR, BAZ and SPAM because he doesn't own them
# and no permission has been given
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk]))
def test_with_permission_given_to_folder(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=True):
# give permissions over BAR
FolderPermission.objects.create(
folder=self.bar_folder,
user=self.staff_user,
type=FolderPermission.THIS,
can_edit=FolderPermission.ALLOW,
can_read=FolderPermission.ALLOW,
can_add_children=FolderPermission.ALLOW)
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees 2 folder : FOO, BAR
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk, self.bar_folder.pk]))
def test_with_permission_given_to_parent_folder(self):
with SettingsOverride(filer_settings, FILER_ENABLE_PERMISSIONS=True):
FolderPermission.objects.create(
folder=self.parent,
user=self.staff_user,
type=FolderPermission.CHILDREN,
can_edit=FolderPermission.ALLOW,
can_read=FolderPermission.ALLOW,
can_add_children=FolderPermission.ALLOW)
response = self.client.get(
reverse('admin:filer-directory_listing',
kwargs={'folder_id': self.parent.id}))
item_list = response.context['paginated_items'].object_list
# user sees all items because he has permissions on the parent folder
self.assertEquals(
set(folder.pk for folder, folder_perms in item_list),
set([self.foo_folder.pk, self.bar_folder.pk, self.baz_folder.pk,
self.spam_file.pk]))
|
|
import pandas as pd
import geohash
import time
import itertools
import numpy as np
import random
'''
This module is meant to be used in conjunction with pipekml however I could see how it could have other uses.
Module: pipegeohash.py
Purpose: A simple tool for geohashing an entire table at once,and allowing you to update your both corresponding tables simultaneously
Functions to be used:
1) map_table(csvfile,presicion)-given a csv file name and a presicion from (1 to 8) return the coresponding geohash
2) df2list(df)-takes a dataframe to a list
3) list2df(list)-takes a list and turn it into DataFrame
Created by: Bennett Murphy
email: murphy214@marshall.edu
'''
#takes a dataframe and turns it into a list
def df2list(df):
df = [df.columns.values.tolist()]+df.values.tolist()
return df
#takes a list and turns it into a datafrae
def list2df(df):
df = pd.DataFrame(df[1:], columns=df[0])
return df
# encoding the geohash string containing arguments into a function that will be mapped
def my_encode(argstring):
argstring = str.split(argstring,'_')
lat,long,precision = float(argstring[0]),float(argstring[1]),int(argstring[2])
try:
hash = geohash.encode(lat,long,precision)
except:
hash = ''
return hash
# getting lat and long headers
def get_latlong_headers(headers):
for row in headers:
if 'lat' in str(row).lower():
latheader = row
elif 'lon' in str(row).lower():
longheader = row
return [latheader,longheader]
# given a dataframe and a list of columnsn
# drops columns from df
def drop_columns(table,columns):
list = []
count = 0
for row in columns:
columnrow = row
for row in table.columns.values.tolist():
if columnrow == row:
list.append(count)
count += 1
table.drop(table.columns[list], axis=1, inplace=True)
return table
# function making the geohash string and returning the table with an
# appropriate geohash column
def geohash_table(data,latlongheaders,precision):
latheader,longheader = latlongheaders
data['ARGS'] = data[latheader].astype(str) + '_' + data[longheader].astype(str) + '_' + str(precision)
data['GEOHASH'] = data['ARGS'].map(my_encode)
data = drop_columns(data,['ARGS'])
return data
# function making the geohash string and returning the table with an
# appropriate geohash column
def geohash_table(data,latlongheaders,precision):
latheader,longheader = latlongheaders
data['ARGS'] = data[latheader].astype(str) + '_' + data[longheader].astype(str) + '_' + str(precision)
data['GEOHASH'] = data['ARGS'].map(my_encode)
data = drop_columns(data,['ARGS'])
return data
# function to perform outer (column) operations on fields and occasonially a function map (maybe)
def perform_outer_operation(data,field,operation):
derivfield = str.split(field,'_')[0]
print derivfield,operation
# right now ill do the 3 stat operations taht seem relevant
if operation.lower() == 'mean':
data[field] = data[derivfield].mean()
elif operation.lower() == 'sum':
data[field] = data[derivfield].sum()
elif operation.lower() == 'std':
data[field] = data[derivfield].std()
elif operation.lower() == 'max':
data[field] = data[derivfield].max()
return data
# makes and returning the squares table.
def make_squares(data,precision,columns):
# doing the grop by and sorting by the highest count value
data['COUNT'] = 1
squares = data[['GEOHASH','COUNT']]
squares = squares.groupby('GEOHASH').sum()
squares = squares.sort(['COUNT'],ascending=[0])
squares = squares.reset_index()
squares['GEOHASH'] = squares['GEOHASH'].astype(str)
squares = squares[squares.GEOHASH.str.len() > 0]
squares = squares.groupby('GEOHASH').first()
squares = squares.reset_index()
# making header
header = ['GEOHASH','LAT1', 'LONG1', 'LAT2', 'LONG2', 'LAT3', 'LONG3', 'LAT4', 'LONG4','COUNT']
newsquares = [header]
# iterating through each square here
for row in df2list(squares)[1:]:
# getting points
points = get_points_geohash(row[0])
# making new row
newrow = [row[0]] + points + row[1:]
# appending to newsquares
newsquares.append(newrow)
# taking newsquares to dataframe
squares = list2df(newsquares)
return squares
# given a geohash returns the 4 points that will make up the squares table
def get_points_geohash(hash):
#processing out the 4 points
hashreturn = geohash.decode_exactly(hash)
#getting lat and long datu
latdatum = hashreturn[0]
longdatum = hashreturn[1]
#getting delta
latdelta = hashreturn[2]
longdelta = hashreturn[3]
point1 = [latdatum-latdelta, longdatum+longdelta]
point2 = [latdatum-latdelta, longdatum-longdelta]
point3 = [latdatum+latdelta, longdatum+longdelta]
point4 = [latdatum+latdelta, longdatum-longdelta]
return point1 + point2 + point3 + point4
# given a geohash returns the 4 points that will make up the squares table
def get_alignment_geohash(hash):
#processing out the 4 points
hashreturn = geohash.decode_exactly(hash)
#getting lat and long datu
latdatum = hashreturn[0]
longdatum = hashreturn[1]
#getting delta
latdelta = hashreturn[2]
longdelta = hashreturn[3]
point1 = [latdatum-latdelta, longdatum+longdelta]
point2 = [latdatum-latdelta, longdatum-longdelta]
point3 = [latdatum+latdelta, longdatum-longdelta]
point4 = [latdatum+latdelta, longdatum+longdelta]
return [point1,point2,point3,point4,point1]
# gets all relevant headers for each value in columns
def get_column_headers(columnsandcount,headers):
columnheaders = []
for row in columnsandcount:
oldrow = str(row)
for row in headers:
if oldrow in str(row):
columnheaders.append(row)
return columnheaders
# given columnsheaders from output above
# checks if the output of str split is above 2
# if it is another operation is supposed to be performed
# in a way an api for other non sum values I guess that
# extra field will be used to determine whether a operation is outer or inner
# group by if a field is adde, thinking aout loud
def get_nonsum_headers(columnheaders):
nonsumheaders = []
for row in columnheaders:
if '_' in str(row):
nonsumheaders.append(row)
return nonsumheaders
# does the inverse of operaton above
def get_sum_headers(columnheaders):
sumheaders = []
for row in columnheaders:
if not '_' in str(row):
sumheaders.append(row)
return sumheaders
# creates geohash and squares table
def map_table(data,precision,**kwargs):
columns = []
filename = False
return_squares = False
map_only = False
geohash_field = False
latlongheaders = False
for key,value in kwargs.iteritems():
if key == 'columns':
columns = value
if key == 'filename':
filename = value
if key == 'return_squares':
return_squares = value
if key == 'map_only':
map_only = value
if key == 'geohash_field':
geohash_field = value
if key == 'latlongheaders':
latlongheaders = value
if geohash_field == False:
# sending into new geohashing function
data = geohash_points(data,precision,latlongheaders)
else:
data['GEOHASH'] = data['GEOHASH'].str[:precision]
# returning data if only the mapped table should be returned
if map_only == True:
return data
# getting column headers
columnheaders = data.columns.values.tolist()
# making squares table
squares = make_squares(data,8,columnheaders)
if not filename == False:
squares.to_csv(filename,index=False)
else:
squares.to_csv('squares' +str(precision) + '.csv',index=False)
if return_squares == True:
return squares
else:
return data
# given a table with a high geohash a list of precisions creates consitutent
# tables and writes out to csv files accordingly
# input the columns field for other values to be summed or grouped by
def make_geohash_tables(table,listofprecisions,**kwargs):
'''
sort_by - field to sort by for each group
return_squares - boolean arg if true returns a list of squares instead of writing out to table
'''
return_squares = False
sort_by = 'COUNT'
# logic for accepting kwarg inputs
for key,value in kwargs.iteritems():
if key == 'sort_by':
sort_by = value
if key == 'return_squares':
return_squares = value
# getting header
header = df2list(table)[0]
# getting columns
columns = header[10:]
# getting original table
originaltable = table
if not sort_by == 'COUNT':
originaltable = originaltable.sort([sort_by],ascending=[0])
listofprecisions = sorted(listofprecisions,reverse=True)
# making total table to hold a list of dfs
if return_squares == True and listofprecisions[-1] == 8:
total_list = [table]
elif return_squares == True:
total_list = []
for row in listofprecisions:
precision = int(row)
table = originaltable
table['GEOHASH'] = table.GEOHASH.str[:precision]
table = table[['GEOHASH','COUNT']+columns].groupby(['GEOHASH'],sort=True).sum()
table = table.sort([sort_by],ascending=[0])
table = table.reset_index()
newsquares = [header]
# iterating through each square here
for row in df2list(table)[1:]:
# getting points
points = get_points_geohash(row[0])
# making new row
newrow = [row[0]] + points + row[1:]
# appending to newsquares
newsquares.append(newrow)
# taking newsquares to dataframe
table = list2df(newsquares)
if return_squares == True:
total_list.append(table)
else:
table.to_csv('squares' + str(precision) + '.csv',index=False)
if return_squares == True:
return total_list
else:
print 'Wrote output squares tables to csv files.'
# given a list of geohashs returns a dataframe that can be
# sent into make blocks
def make_geohash_blocks(geohashs,**kwargs):
df = False
for key,value in kwargs.iteritems():
if key == 'df':
df = value
if df == True:
geohashs = geohashs.unstack(level=0).reset_index()[0].values.tolist()
header = ['GEOHASH','LAT1', 'LONG1', 'LAT2', 'LONG2', 'LAT3', 'LONG3', 'LAT4', 'LONG4','COUNT']
newlist = [header]
for row in geohashs:
if not row == '':
points = get_points_geohash(row)
newrow = [row] + points + [1]
newlist.append(newrow)
return list2df(newlist)
# given a table of points and geohashs returns the same table with indicies positon in each geohash
# from indicies get decimal points
def ind_dec_points(alignmentdf):
# getting alignment df
header = alignmentdf.columns.values.tolist()
count =0
for row in header:
if 'lat' in row.lower():
latpos = count
elif 'long' in row.lower():
longpos = count
elif 'geohash' in row.lower():
hashpos = count
count += 1
xs = []
ys = []
for row in alignmentdf.values.tolist():
lat = row[latpos]
long = row[longpos]
ghash = row[hashpos]
midlat,midlong,latdelta,longdelta = geohash.decode_exactly(ghash)
ulcornerpoint = [midlat + latdelta,midlong - longdelta]
latsize = latdelta * 2
longsize = longdelta * 2
x = abs(ulcornerpoint[1] - long) / longsize
y = abs(ulcornerpoint[0] - lat) / latsize
xs.append(x)
ys.append(y)
alignmentdf['x'] = xs
alignmentdf['y'] = ys
return alignmentdf
# mapped function for creating geohahs center points
def make_geohash_point(ghash):
lat,long = geohash.decode(ghash)
return [long,lat]
# creates center point df from a unique geohash list
def points_from_geohash(geohashlist):
data = pd.DataFrame(geohashlist,columns=['GEOHASH'])
holder = data['GEOHASH'].apply(make_geohash_point)
data[['LONG','LAT']] = pd.DataFrame(holder.values.tolist(),columns=['LONG','LAT'])
return data
# creates center point df from a unique geohash list
def points_from_geohash4(geohashlist):
total = [['GEOHASH','LONG','LAT']]
for row in geohashlist:
y,x,yd,xd = geohash.decode_exactly(row)
pt1 = [row,x+xd,y+yd] # ne
pt2 = [row,x-xd,y-yd] # sw
pt3 = [row,x+xd,y-yd] # se
pt4 = [row,x-xd,y+yd] # nw
total += [pt1,pt2,pt3,pt4]
total = pd.DataFrame(total[1:],columns=total[0])
return total
# the second part of the actual geohashing process
# where the actual geohashing occurs
def geohash_linted(lats,lngs,precision):
newlist = []
ds = []
for i in range(0,len(lats)):
oi = (lats[i],lngs[i],precision)
#newlist.append(oi)
ds.append(geohash.encode(*oi))
#for i in range(0,len(pts)):
#ds.append(geohash.encode(*newlist[i]))
return ds
# lints points for non hashable data types
def lint_values(data,latlongheaders):
if not latlongheaders == False:
lathead,longhead = latlongheaders
else:
for row in data.columns.values.tolist():
if 'lat' in str(row).lower():
lathead = row
elif 'long' in str(row).lower():
longhead = row
data = data[(data[lathead] < 90.0) & (data[lathead] > -90.0)]
data = data.fillna(value=0)
return data[lathead].astype(float).values.tolist(),data[longhead].values.tolist(),data
# performs both operations above
# may accept a kwarg to throw the output geohash into an area function letter
def geohash_points(data,precision,latlongheaders):
# selecting the point values that can be geohashed
#meaning anything under or above 90 to - 90
lats,longs,data = lint_values(data,latlongheaders)
data['GEOHASH'] = geohash_linted(lats,longs,precision)
return data
# given a number of points in which to generate
# returns a random number of lat and longs for testing etc.
# this function is encapsulated so that its not easier to just geohash
# returns df with fields lat,long
def random_points(number):
os = []
for i in range(number):
o = ((random.random()*2 - 1.0)*90.0, (random.random()*2 - 1.0)*180.0 )
os.append(o)
return pd.DataFrame(os,columns = ['LAT','LONG'])
def latval(latitude):
if latitude > 0:
return (latitude / 90.0) / 2.0 + .5
else:
return .5 - abs((latitude / 90.0) / 2.0 )
def longval(longitude):
if longitude > 0:
return (longitude / 180.0) / 2.0 + .5
else:
return .5 - abs((longitude / 180.0) / 2.0 )
# given an extrema retreives rangdom points within extrema to be generated
def random_points_extrema(number,extrema):
os = []
latmax = extrema['n']
latmin = extrema['s']
longmin = extrema['w']
longmax = extrema['e']
decx1 = latval(latmin)
decx2 = latval(latmax)
decy1 = longval(longmin)
decy2 = longval(longmax)
minlat = 30.0
for i in range(number):
o = ((random.uniform(decx1,decx2)*2 - 1)*90, (random.uniform(decy1,decy2)*2 - 1.0)*180 )
os.append(o)
return pd.DataFrame(os,columns = ['LAT','LONG'])
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for normalizing flows.
For a review of normalizing flows see: https://arxiv.org/abs/1912.02762
The abstract base class ConfigurableFlow demonstrates our minimal interface.
Although the standard change of variables formula requires that
normalizing flows are invertible, none of the algorithms in train.py
require evaluating that inverse explicitly so inverses are not implemented.
"""
import abc
from typing import Callable, List, Tuple
import annealed_flow_transport.aft_types as tp
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
Array = tp.Array
ConfigDict = tp.ConfigDict
class ConfigurableFlow(hk.Module, abc.ABC):
"""Abstract base clase for configurable normalizing flows.
This is the interface expected by all flow based algorithms called in train.py
"""
def __init__(self, config: ConfigDict):
super().__init__()
self._check_configuration(config)
self._config = config
def _check_input(self, x: Array) -> Array:
chex.assert_rank(x, 1)
def _check_outputs(self, x: Array, transformed_x: Array,
log_abs_det_jac: Array) -> Array:
chex.assert_rank(x, 1)
chex.assert_equal_shape([x, transformed_x])
chex.assert_shape(log_abs_det_jac, ())
def _check_members_types(self, config: ConfigDict, expected_members_types):
for elem, elem_type in expected_members_types:
if elem not in config:
raise ValueError('Flow config element not found: ', elem)
if not isinstance(config[elem], elem_type):
msg = 'Flow config element '+elem+' is not of type '+str(elem_type)
raise TypeError(msg)
def __call__(self, x: Array) -> Tuple[Array, Array]:
"""Call transform_and_log abs_det_jac with automatic shape checking.
This calls transform_and_log_abs_det_jac which needs to be implemented
in derived classes.
Args:
x: Array size (num_dim,) containing input to flow.
Returns:
Array size (num_dim,) containing output and Scalar log abs det Jacobian.
"""
self._check_input(x)
output, log_abs_det_jac = self.transform_and_log_abs_det_jac(x)
self._check_outputs(x, output, log_abs_det_jac)
return output, log_abs_det_jac
@abc.abstractmethod
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
"""Transform x through the flow and compute log abs determinant of Jacobian.
Args:
x: (num_dim,) input to the flow.
Returns:
Array size (num_dim,) containing output and Scalar log abs det Jacobian.
"""
@abc.abstractmethod
def _check_configuration(self, config: ConfigDict):
"""Check the configuration includes the necessary fields.
Will typically raise Assertion like errors.
Args:
config: A ConfigDict include the fields required by the flow.
"""
class DiagonalAffine(ConfigurableFlow):
"""An affine transformation with a positive diagonal matrix."""
def _check_configuration(self, unused_config: ConfigDict):
pass
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
num_elem = x.shape[0]
unconst_diag_init = hk.initializers.Constant(jnp.zeros((num_elem,)))
bias_init = hk.initializers.Constant(jnp.zeros((num_elem,)))
unconst_diag = hk.get_parameter('unconst_diag',
shape=[num_elem],
dtype=x.dtype,
init=unconst_diag_init)
bias = hk.get_parameter('bias',
shape=[num_elem],
dtype=x.dtype,
init=bias_init)
output = jnp.exp(unconst_diag)*x + bias
log_abs_det = jnp.sum(unconst_diag)
return output, log_abs_det
def rational_quadratic_spline(x: Array,
bin_positions: Array,
bin_heights: Array,
derivatives: Array) -> Tuple[Array, Array]:
"""Compute a rational quadratic spline.
See https://arxiv.org/abs/1906.04032
Args:
x: A single real number.
bin_positions: A sorted array of bin positions of length num_bins+1.
bin_heights: An array of bin heights of length num_bins+1.
derivatives: An array of derivatives at bin positions of length num_bins+1.
Returns:
Value of the rational quadratic spline at x.
Derivative with respect to x of rational quadratic spline at x.
"""
bin_index = jnp.searchsorted(bin_positions, x)
array_index = bin_index % len(bin_positions)
lower_x = bin_positions[array_index-1]
upper_x = bin_positions[array_index]
lower_y = bin_heights[array_index-1]
upper_y = bin_heights[array_index]
lower_deriv = derivatives[array_index-1]
upper_deriv = derivatives[array_index]
delta_x = upper_x - lower_x
delta_y = upper_y - lower_y
slope = delta_y / delta_x
alpha = (x - lower_x)/delta_x
alpha_squared = jnp.square(alpha)
beta = alpha * (1.-alpha)
gamma = jnp.square(1.-alpha)
epsilon = upper_deriv+lower_deriv -2. *slope
numerator_quadratic = delta_y * (slope*alpha_squared + lower_deriv*beta)
denominator_quadratic = slope + epsilon*beta
interp_x = lower_y + numerator_quadratic/denominator_quadratic
# now compute derivative
numerator_deriv = jnp.square(slope) * (
upper_deriv * alpha_squared + 2. * slope * beta + lower_deriv * gamma)
sqrt_denominator_deriv = slope + epsilon*beta
denominator_deriv = jnp.square(sqrt_denominator_deriv)
deriv = numerator_deriv / denominator_deriv
return interp_x, deriv
def identity_padded_rational_quadratic_spline(
x: Array, bin_positions: Array, bin_heights: Array,
derivatives: Array) -> Tuple[Array, Array]:
"""An identity padded rational quadratic spline.
Args:
x: the value to evaluate the spline at.
bin_positions: sorted values of bin x positions of length num_bins+1.
bin_heights: absolute height of bin of length num_bins-1.
derivatives: derivatives at internal bin edge of length num_bins-1.
Returns:
The value of the spline at x.
The derivative with respect to x of the spline at x.
"""
lower_limit = bin_positions[0]
upper_limit = bin_positions[-1]
bin_height_sequence = (jnp.atleast_1d(jnp.array(lower_limit)),
bin_heights,
jnp.atleast_1d(jnp.array(upper_limit)))
full_bin_heights = jnp.concatenate(bin_height_sequence)
derivative_sequence = (jnp.ones((1,)),
derivatives,
jnp.ones((1,)))
full_derivatives = jnp.concatenate(derivative_sequence)
in_range = jnp.logical_and(jnp.greater(x, lower_limit),
jnp.less(x, upper_limit))
multiplier = in_range*1.
multiplier_complement = jnp.logical_not(in_range)*1.
spline_val, spline_deriv = rational_quadratic_spline(x,
bin_positions,
full_bin_heights,
full_derivatives)
identity_val = x
identity_deriv = 1.
val = spline_val*multiplier + multiplier_complement*identity_val
deriv = spline_deriv*multiplier + multiplier_complement*identity_deriv
return val, deriv
class AutoregressiveMLP(hk.Module):
"""An MLP which is constrained to have autoregressive dependency."""
def __init__(self,
num_hiddens_per_input_dim: List[int],
include_self_links: bool,
non_linearity,
zero_final: bool,
bias_last: bool,
name=None):
super().__init__(name=name)
self._num_hiddens_per_input_dim = num_hiddens_per_input_dim
self._include_self_links = include_self_links
self._non_linearity = non_linearity
self._zero_final = zero_final
self._bias_last = bias_last
def __call__(self, x: Array) -> Array:
input_dim = x.shape[0]
hidden_representation = jnp.atleast_2d(x).T
prev_hid_per_dim = 1
num_hidden_layers = len(self._num_hiddens_per_input_dim)
final_index = num_hidden_layers-1
for layer_index in range(num_hidden_layers):
is_last_layer = (final_index == layer_index)
hid_per_dim = self._num_hiddens_per_input_dim[layer_index]
name_stub = '_'+str(layer_index)
layer_shape = (input_dim,
prev_hid_per_dim,
input_dim,
hid_per_dim)
in_degree = prev_hid_per_dim * input_dim
if is_last_layer and self._zero_final:
w_init = jnp.zeros
else:
w_init = hk.initializers.TruncatedNormal(1. / np.sqrt(in_degree))
bias_init = hk.initializers.Constant(jnp.zeros((input_dim, hid_per_dim,)))
weights = hk.get_parameter(name='weights'+name_stub,
shape=layer_shape,
dtype=x.dtype,
init=w_init)
if is_last_layer and not self._bias_last:
biases = jnp.zeros((input_dim, hid_per_dim,))
else:
biases = hk.get_parameter(name='biases'+name_stub,
shape=(input_dim, hid_per_dim),
dtype=x.dtype,
init=bias_init)
if not(self._include_self_links) and is_last_layer:
k = -1
else:
k = 0
mask = jnp.tril(jnp.ones((input_dim, input_dim)),
k=k)
masked_weights = mask[:, None, :, None] * weights
new_hidden_representation = jnp.einsum('ijkl,ij->kl',
masked_weights,
hidden_representation) + biases
prev_hid_per_dim = hid_per_dim
if not is_last_layer:
hidden_representation = self._non_linearity(new_hidden_representation)
else:
hidden_representation = new_hidden_representation
return hidden_representation
class InverseAutogressiveFlow(object):
"""A generic inverse autoregressive flow.
See https://arxiv.org/abs/1606.04934
Takes two functions as input.
1) autoregressive_func takes array of (num_dim,)
and returns array (num_dim, num_features)
it is autoregressive in the sense that the output[i, :]
depends only on the input[:i]. This is not checked.
2) transform_func takes array of (num_dim, num_features) and
an array of (num_dim,) and returns output of shape (num_dim,)
and a single log_det_jacobian value. The represents the transformation
acting on the inputs with given parameters.
"""
def __init__(self,
autoregressive_func: Callable[[Array], Array],
transform_func: Callable[[Array, Array], Tuple[Array, Array]]):
self._autoregressive_func = autoregressive_func
self._transform_func = transform_func
def __call__(self, x: Array) -> Tuple[Array, Array]:
"""x is of shape (num_dim,)."""
transform_features = self._autoregressive_func(x)
output, log_abs_det = self._transform_func(transform_features, x)
return output, log_abs_det
class SplineInverseAutoregressiveFlow(ConfigurableFlow):
"""An inverse autoregressive flow with spline transformer.
config must contain the following fields:
num_spline_bins: Number of bins for rational quadratic spline.
intermediate_hids_per_dim: See AutoregresiveMLP.
num_layers: Number of layers for AutoregressiveMLP.
identity_init: Whether to initalize the flow to the identity.
bias_last: Whether to include biases on the last later of AutoregressiveMLP
lower_lim: Lower limit of active region for rational quadratic spline.
upper_lim: Upper limit of active region for rational quadratic spline.
min_bin_size: Minimum bin size for rational quadratic spline.
min_derivative: Minimum derivative for rational quadratic spline.
"""
def __init__(self,
config: ConfigDict):
super().__init__(config)
self._num_spline_bins = config.num_spline_bins
num_spline_parameters = 3 * config.num_spline_bins - 1
num_hids_per_input_dim = [config.intermediate_hids_per_dim
] * config.num_layers + [
num_spline_parameters
]
self._autoregressive_mlp = AutoregressiveMLP(
num_hids_per_input_dim,
include_self_links=False,
non_linearity=jax.nn.leaky_relu,
zero_final=config.identity_init,
bias_last=config.bias_last)
self._lower_lim = config.lower_lim
self._upper_lim = config.upper_lim
self._min_bin_size = config.min_bin_size
self._min_derivative = config.min_derivative
def _check_configuration(self, config: ConfigDict):
expected_members_types = [
('num_spline_bins', int),
('intermediate_hids_per_dim', int),
('num_layers', int),
('identity_init', bool),
('bias_last', bool),
('lower_lim', float),
('upper_lim', float),
('min_bin_size', float),
('min_derivative', float)
]
self._check_members_types(config, expected_members_types)
def _unpack_spline_params(self, raw_param_vec) -> Tuple[Array, Array, Array]:
unconst_bin_size_x = raw_param_vec[:self._num_spline_bins]
unconst_bin_size_y = raw_param_vec[self._num_spline_bins:2 *
self._num_spline_bins]
unconst_derivs = raw_param_vec[2 * self._num_spline_bins:(
3 * self._num_spline_bins - 1)]
return unconst_bin_size_x, unconst_bin_size_y, unconst_derivs
def _transform_raw_to_spline_params(
self, raw_param_vec: Array) -> Tuple[Array, Array, Array]:
unconst_bin_size_x, unconst_bin_size_y, unconst_derivs = self._unpack_spline_params(
raw_param_vec)
def normalize_bin_sizes(unconst_bin_sizes: Array) -> Array:
bin_range = self._upper_lim - self._lower_lim
reduced_bin_range = (
bin_range - self._num_spline_bins * self._min_bin_size)
return jax.nn.softmax(
unconst_bin_sizes) * reduced_bin_range + self._min_bin_size
bin_size_x = normalize_bin_sizes(unconst_bin_size_x)
bin_size_y = normalize_bin_sizes(unconst_bin_size_y)
# get the x bin positions.
array_sequence = (jnp.ones((1,))*self._lower_lim, bin_size_x)
x_bin_pos = jnp.cumsum(jnp.concatenate(array_sequence))
# get the y bin positions, ignoring redundant terms.
stripped_y_bin_pos = self._lower_lim + jnp.cumsum(bin_size_y[:-1])
def forward_positive_transform(unconst_value: Array,
min_value: Array) -> Array:
return jax.nn.softplus(unconst_value) + min_value
def inverse_positive_transform(const_value: Array,
min_value: Array) -> Array:
return jnp.log(jnp.expm1(const_value-min_value))
inverted_one = inverse_positive_transform(1., self._min_derivative)
derivatives = forward_positive_transform(unconst_derivs + inverted_one,
self._min_derivative)
return x_bin_pos, stripped_y_bin_pos, derivatives
def _get_spline_values(self,
raw_parameters: Array,
x: Array) -> Tuple[Array, Array]:
bat_get_parameters = jax.vmap(self._transform_raw_to_spline_params)
bat_x_bin_pos, bat_stripped_y, bat_derivatives = bat_get_parameters(
raw_parameters)
# Vectorize spline over data and parameters.
bat_get_spline_vals = jax.vmap(identity_padded_rational_quadratic_spline,
in_axes=[0, 0, 0, 0])
spline_vals, derivs = bat_get_spline_vals(x, bat_x_bin_pos, bat_stripped_y,
bat_derivatives)
log_abs_det = jnp.sum(jnp.log(jnp.abs(derivs)))
return spline_vals, log_abs_det
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
iaf = InverseAutogressiveFlow(self._autoregressive_mlp,
self._get_spline_values)
return iaf(x)
class AffineInverseAutoregressiveFlow(ConfigurableFlow):
"""An inverse autoregressive flow with affine transformer.
config must contain the following fields:
intermediate_hids_per_dim: See AutoregresiveMLP.
num_layers: Number of layers for AutoregressiveMLP.
identity_init: Whether to initalize the flow to the identity.
bias_last: Whether to include biases on the last later of AutoregressiveMLP
"""
def __init__(self,
config: ConfigDict):
super().__init__(config)
num_affine_params = 2
num_hids_per_input_dim = [config.intermediate_hids_per_dim
] * config.num_layers + [num_affine_params]
self._autoregressive_mlp = AutoregressiveMLP(
num_hids_per_input_dim,
include_self_links=False,
non_linearity=jax.nn.leaky_relu,
zero_final=config.identity_init,
bias_last=config.bias_last)
def _check_configuration(self, config: ConfigDict):
expected_members_types = [('intermediate_hids_per_dim', int),
('num_layers', int),
('identity_init', bool),
('bias_last', bool)
]
self._check_members_types(config, expected_members_types)
def _get_affine_transformation(self,
raw_parameters: Array,
x: Array) -> Tuple[Array, Array]:
shifts = raw_parameters[:, 0]
scales = raw_parameters[:, 1] + jnp.ones_like(raw_parameters[:, 1])
log_abs_det = jnp.sum(jnp.log(jnp.abs(scales)))
output = x * scales + shifts
return output, log_abs_det
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
iaf = InverseAutogressiveFlow(self._autoregressive_mlp,
self._get_affine_transformation)
return iaf(x)
def affine_transformation(params: Array,
x: Array) -> Tuple[Array, Array]:
shift = params[0]
# Assuming params start as zero adding 1 to scale gives identity transform.
scale = params[1] + 1.
output = x * scale + shift
return output, jnp.log(jnp.abs(scale))
class RationalQuadraticSpline(ConfigurableFlow):
"""A learnt monotonic rational quadratic spline with identity padding.
Each input dimension is operated on by a separate spline.
The spline is initialized to the identity.
config must contain the following fields:
num_bins: Number of bins for rational quadratic spline.
lower_lim: Lower limit of active region for rational quadratic spline.
upper_lim: Upper limit of active region for rational quadratic spline.
min_bin_size: Minimum bin size for rational quadratic spline.
min_derivative: Minimum derivative for rational quadratic spline.
"""
def __init__(self,
config: ConfigDict):
super().__init__(config)
self._num_bins = config.num_bins
self._lower_lim = config.lower_lim
self._upper_lim = config.upper_lim
self._min_bin_size = config.min_bin_size
self._min_derivative = config.min_derivative
def _check_configuration(self, config: ConfigDict):
expected_members_types = [
('num_bins', int),
('lower_lim', float),
('upper_lim', float),
('min_bin_size', float),
('min_derivative', float)
]
self._check_members_types(config, expected_members_types)
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
"""Apply the spline transformation.
Args:
x: (num_dim,) DeviceArray representing flow input.
Returns:
output: (num_dim,) transformed sample through flow.
log_prob_out: new Scalar representing log_probability of output.
"""
num_dim = x.shape[0]
bin_parameter_shape = (num_dim, self._num_bins)
# Setup the bin position and height parameters.
bin_init = hk.initializers.Constant(jnp.ones(bin_parameter_shape))
unconst_bin_size_x = hk.get_parameter(
'unconst_bin_size_x',
shape=bin_parameter_shape,
dtype=x.dtype,
init=bin_init)
unconst_bin_size_y = hk.get_parameter(
'unconst_bin_size_y',
shape=bin_parameter_shape,
dtype=x.dtype,
init=bin_init)
def normalize_bin_sizes(unconst_bin_sizes):
bin_range = self._upper_lim - self._lower_lim
reduced_bin_range = (bin_range - self._num_bins * self._min_bin_size)
return jax.nn.softmax(
unconst_bin_sizes) * reduced_bin_range + self._min_bin_size
batched_normalize = jax.vmap(normalize_bin_sizes)
bin_size_x = batched_normalize(unconst_bin_size_x)
bin_size_y = batched_normalize(unconst_bin_size_y)
array_sequence = (jnp.ones((num_dim, 1)) * self._lower_lim, bin_size_x)
bin_positions = jnp.cumsum(jnp.concatenate(array_sequence, axis=1), axis=1)
# Don't include the redundant bin heights.
stripped_bin_heights = self._lower_lim + jnp.cumsum(
bin_size_y[:, :-1], axis=1)
# Setup the derivative parameters.
def forward_positive_transform(unconst_value, min_value):
return jax.nn.softplus(unconst_value) + min_value
def inverse_positive_transform(const_value, min_value):
return jnp.log(jnp.expm1(const_value - min_value))
deriv_parameter_shape = (num_dim, self._num_bins - 1)
inverted_one = inverse_positive_transform(1., self._min_derivative)
deriv_init = hk.initializers.Constant(
jnp.ones(deriv_parameter_shape) * inverted_one)
unconst_deriv = hk.get_parameter(
'unconst_deriv',
shape=deriv_parameter_shape,
dtype=x.dtype,
init=deriv_init)
batched_positive_transform = jax.vmap(
forward_positive_transform, in_axes=[0, None])
deriv = batched_positive_transform(unconst_deriv, self._min_derivative)
# Setup batching then apply the spline.
batch_padded_rq_spline = jax.vmap(
identity_padded_rational_quadratic_spline, in_axes=[0, 0, 0, 0])
output, jac_terms = batch_padded_rq_spline(x, bin_positions,
stripped_bin_heights, deriv)
log_abs_det_jac = jnp.sum(jnp.log(jac_terms))
return output, log_abs_det_jac
class ComposedFlows(ConfigurableFlow):
"""Class to compose flows based on a list of configs.
config should contain flow_configs a list of flow configs to compose.
"""
def __init__(self, config: ConfigDict):
super().__init__(config)
self._flows = []
for flow_config in self._config.flow_configs:
base_flow_class = globals()[flow_config.type]
flow = base_flow_class(flow_config)
self._flows.append(flow)
def _check_configuration(self, config: ConfigDict):
expected_members_types = [
('flow_configs', list),
]
self._check_members_types(config, expected_members_types)
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
log_abs_det = 0.
progress = x
for flow in self._flows:
progress, log_abs_det_increment = flow(progress)
log_abs_det += log_abs_det_increment
return progress, log_abs_det
|
|
"""The tests for the Xiaomi router device tracker platform."""
import logging
import requests
from homeassistant.components.device_tracker import DOMAIN
import homeassistant.components.xiaomi.device_tracker as xiaomi
from homeassistant.components.xiaomi.device_tracker import get_scanner
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PLATFORM, CONF_USERNAME
from tests.async_mock import MagicMock, call, patch
_LOGGER = logging.getLogger(__name__)
INVALID_USERNAME = "bob"
TOKEN_TIMEOUT_USERNAME = "tok"
URL_AUTHORIZE = "http://192.168.0.1/cgi-bin/luci/api/xqsystem/login"
URL_LIST_END = "api/misystem/devicelist"
FIRST_CALL = True
def mocked_requests(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
@property
def content(self):
"""Return the content of the response."""
return self.json()
def raise_for_status(self):
"""Raise an HTTPError if status is not 200."""
if self.status_code != 200:
raise requests.HTTPError(self.status_code)
data = kwargs.get("data")
global FIRST_CALL
if data and data.get("username", None) == INVALID_USERNAME:
# deliver an invalid token
return MockResponse({"code": "401", "msg": "Invalid token"}, 200)
if data and data.get("username", None) == TOKEN_TIMEOUT_USERNAME:
# deliver an expired token
return MockResponse(
{
"url": "/cgi-bin/luci/;stok=ef5860/web/home",
"token": "timedOut",
"code": "0",
},
200,
)
if str(args[0]).startswith(URL_AUTHORIZE):
# deliver an authorized token
return MockResponse(
{
"url": "/cgi-bin/luci/;stok=ef5860/web/home",
"token": "ef5860",
"code": "0",
},
200,
)
if str(args[0]).endswith(f"timedOut/{URL_LIST_END}") and FIRST_CALL is True:
FIRST_CALL = False
# deliver an error when called with expired token
return MockResponse({"code": "401", "msg": "Invalid token"}, 200)
if str(args[0]).endswith(URL_LIST_END):
# deliver the device list
return MockResponse(
{
"mac": "1C:98:EC:0E:D5:A4",
"list": [
{
"mac": "23:83:BF:F6:38:A0",
"oname": "12255ff",
"isap": 0,
"parent": "",
"authority": {"wan": 1, "pridisk": 0, "admin": 1, "lan": 0},
"push": 0,
"online": 1,
"name": "Device1",
"times": 0,
"ip": [
{
"downspeed": "0",
"online": "496957",
"active": 1,
"upspeed": "0",
"ip": "192.168.0.25",
}
],
"statistics": {
"downspeed": "0",
"online": "496957",
"upspeed": "0",
},
"icon": "",
"type": 1,
},
{
"mac": "1D:98:EC:5E:D5:A6",
"oname": "CdddFG58",
"isap": 0,
"parent": "",
"authority": {"wan": 1, "pridisk": 0, "admin": 1, "lan": 0},
"push": 0,
"online": 1,
"name": "Device2",
"times": 0,
"ip": [
{
"downspeed": "0",
"online": "347325",
"active": 1,
"upspeed": "0",
"ip": "192.168.0.3",
}
],
"statistics": {
"downspeed": "0",
"online": "347325",
"upspeed": "0",
},
"icon": "",
"type": 0,
},
],
"code": 0,
},
200,
)
_LOGGER.debug("UNKNOWN ROUTE")
@patch(
"homeassistant.components.xiaomi.device_tracker.XiaomiDeviceScanner",
return_value=MagicMock(),
)
async def test_config(xiaomi_mock, hass):
"""Testing minimal configuration."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_PASSWORD: "passwordTest",
}
)
}
xiaomi.get_scanner(hass, config)
assert xiaomi_mock.call_count == 1
assert xiaomi_mock.call_args == call(config[DOMAIN])
call_arg = xiaomi_mock.call_args[0][0]
assert call_arg["username"] == "admin"
assert call_arg["password"] == "passwordTest"
assert call_arg["host"] == "192.168.0.1"
assert call_arg["platform"] == "device_tracker"
@patch(
"homeassistant.components.xiaomi.device_tracker.XiaomiDeviceScanner",
return_value=MagicMock(),
)
async def test_config_full(xiaomi_mock, hass):
"""Testing full configuration."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: "alternativeAdminName",
CONF_PASSWORD: "passwordTest",
}
)
}
xiaomi.get_scanner(hass, config)
assert xiaomi_mock.call_count == 1
assert xiaomi_mock.call_args == call(config[DOMAIN])
call_arg = xiaomi_mock.call_args[0][0]
assert call_arg["username"] == "alternativeAdminName"
assert call_arg["password"] == "passwordTest"
assert call_arg["host"] == "192.168.0.1"
assert call_arg["platform"] == "device_tracker"
@patch("requests.get", side_effect=mocked_requests)
@patch("requests.post", side_effect=mocked_requests)
async def test_invalid_credential(mock_get, mock_post, hass):
"""Testing invalid credential handling."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: INVALID_USERNAME,
CONF_PASSWORD: "passwordTest",
}
)
}
assert get_scanner(hass, config) is None
@patch("requests.get", side_effect=mocked_requests)
@patch("requests.post", side_effect=mocked_requests)
async def test_valid_credential(mock_get, mock_post, hass):
"""Testing valid refresh."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: "admin",
CONF_PASSWORD: "passwordTest",
}
)
}
scanner = get_scanner(hass, config)
assert scanner is not None
assert 2 == len(scanner.scan_devices())
assert "Device1" == scanner.get_device_name("23:83:BF:F6:38:A0")
assert "Device2" == scanner.get_device_name("1D:98:EC:5E:D5:A6")
@patch("requests.get", side_effect=mocked_requests)
@patch("requests.post", side_effect=mocked_requests)
async def test_token_timed_out(mock_get, mock_post, hass):
"""Testing refresh with a timed out token.
New token is requested and list is downloaded a second time.
"""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: TOKEN_TIMEOUT_USERNAME,
CONF_PASSWORD: "passwordTest",
}
)
}
scanner = get_scanner(hass, config)
assert scanner is not None
assert 2 == len(scanner.scan_devices())
assert "Device1" == scanner.get_device_name("23:83:BF:F6:38:A0")
assert "Device2" == scanner.get_device_name("1D:98:EC:5E:D5:A6")
|
|
# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Mikolaj Magnuski <mmagnuski@swps.edu.pl>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
from copy import deepcopy
from distutils.version import LooseVersion
import os.path as op
import shutil
from unittest import SkipTest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_allclose)
import pytest
from scipy import io
from mne import write_events, read_epochs_eeglab
from mne.io import read_raw_eeglab
from mne.io.tests.test_raw import _test_raw_reader
from mne.datasets import testing
from mne.utils import run_tests_if_main, requires_h5py
from mne.annotations import events_from_annotations, read_annotations
base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
raw_fname_mat = op.join(base_dir, 'test_raw.set')
raw_fname_onefile_mat = op.join(base_dir, 'test_raw_onefile.set')
raw_fname_event_duration = op.join(base_dir, 'test_raw_event_duration.set')
epochs_fname_mat = op.join(base_dir, 'test_epochs.set')
epochs_fname_onefile_mat = op.join(base_dir, 'test_epochs_onefile.set')
raw_mat_fnames = [raw_fname_mat, raw_fname_onefile_mat]
epochs_mat_fnames = [epochs_fname_mat, epochs_fname_onefile_mat]
raw_fname_h5 = op.join(base_dir, 'test_raw_h5.set')
raw_fname_onefile_h5 = op.join(base_dir, 'test_raw_onefile_h5.set')
epochs_fname_h5 = op.join(base_dir, 'test_epochs_h5.set')
epochs_fname_onefile_h5 = op.join(base_dir, 'test_epochs_onefile_h5.set')
raw_h5_fnames = [raw_fname_h5, raw_fname_onefile_h5]
epochs_h5_fnames = [epochs_fname_h5, epochs_fname_onefile_h5]
raw_fnames = [raw_fname_mat, raw_fname_onefile_mat,
raw_fname_h5, raw_fname_onefile_h5]
montage = op.join(base_dir, 'test_chans.locs')
def _check_h5(fname):
if fname.endswith('_h5.set'):
try:
import h5py # noqa, analysis:ignore
except Exception:
raise SkipTest('h5py module required')
@requires_h5py
@testing.requires_testing_data
@pytest.mark.parametrize('fnames', [raw_mat_fnames, raw_h5_fnames])
def test_io_set_raw(fnames, tmpdir):
"""Test importing EEGLAB .set files."""
tmpdir = str(tmpdir)
raw_fname, raw_fname_onefile = fnames
_test_raw_reader(read_raw_eeglab, input_fname=raw_fname,
montage=montage)
# test that preloading works
raw0 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
preload=True)
raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto',
phase='zero')
# test that using uint16_codec does not break stuff
raw0 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
preload=False, uint16_codec='ascii')
# test reading file with one event (read old version)
eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
squeeze_me=True)['EEG']
# test negative event latencies
negative_latency_fname = op.join(tmpdir, 'test_negative_latency.set')
evnts = deepcopy(eeg.event[0])
evnts.latency = 0
io.savemat(negative_latency_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan,
'data': 'test_negative_latency.fdt',
'epoch': eeg.epoch, 'event': evnts,
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
negative_latency_fname.replace('.set', '.fdt'))
with pytest.warns(RuntimeWarning, match="has a sample index of -1."):
read_raw_eeglab(input_fname=negative_latency_fname, preload=True,
montage=montage)
evnts.latency = -1
io.savemat(negative_latency_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan,
'data': 'test_negative_latency.fdt',
'epoch': eeg.epoch, 'event': evnts,
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
with pytest.raises(ValueError, match='event sample index is negative'):
with pytest.warns(RuntimeWarning, match="has a sample index of -1."):
read_raw_eeglab(input_fname=negative_latency_fname, preload=True,
montage=montage)
# test overlapping events
overlap_fname = op.join(tmpdir, 'test_overlap_event.set')
io.savemat(overlap_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan, 'data': 'test_overlap_event.fdt',
'epoch': eeg.epoch,
'event': [eeg.event[0], eeg.event[0]],
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
overlap_fname.replace('.set', '.fdt'))
# test reading file with one channel
one_chan_fname = op.join(tmpdir, 'test_one_channel.set')
io.savemat(one_chan_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': 1, 'data': np.random.random((1, 3)),
'epoch': eeg.epoch, 'event': eeg.epoch,
'chanlocs': {'labels': 'E1', 'Y': -6.6069,
'X': 6.3023, 'Z': -2.9423},
'times': eeg.times[:3], 'pnts': 3}},
appendmat=False, oned_as='row')
read_raw_eeglab(input_fname=one_chan_fname, preload=True)
# test reading file with 3 channels - one without position information
# first, create chanlocs structured array
ch_names = ['F3', 'unknown', 'FPz']
x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan]
dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]
nopos_dt = [('labels', 'S10'), ('Z', 'f8')]
chanlocs = np.zeros((3,), dtype=dt)
nopos_chanlocs = np.zeros((3,), dtype=nopos_dt)
for ind, vals in enumerate(zip(ch_names, x, y, z)):
for fld in range(4):
chanlocs[ind][dt[fld][0]] = vals[fld]
if fld in (0, 3):
nopos_chanlocs[ind][dt[fld][0]] = vals[fld]
# In theory this should work and be simpler, but there is an obscure
# SciPy writing bug that pops up sometimes:
# nopos_chanlocs = np.array(chanlocs[['labels', 'Z']])
if LooseVersion(np.__version__) == '1.14.0':
# There is a bug in 1.14.0 (or maybe with SciPy 1.0.0?) that causes
# this write to fail!
raise SkipTest('Need to fix bug in NumPy 1.14.0!')
# save set file
one_chanpos_fname = op.join(tmpdir, 'test_chanpos.set')
io.savemat(one_chanpos_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': 3, 'data': np.random.random((3, 3)),
'epoch': eeg.epoch, 'event': eeg.epoch,
'chanlocs': chanlocs, 'times': eeg.times[:3],
'pnts': 3}},
appendmat=False, oned_as='row')
# load it
with pytest.warns(RuntimeWarning, match='did not have a position'):
raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True)
# position should be present for first two channels
for i in range(2):
assert_array_equal(raw.info['chs'][i]['loc'][:3],
np.array([-chanlocs[i]['Y'],
chanlocs[i]['X'],
chanlocs[i]['Z']]))
# position of the last channel should be zero
assert_array_equal(raw.info['chs'][-1]['loc'][:3], [np.nan] * 3)
# test reading channel names from set and positions from montage
with pytest.warns(RuntimeWarning, match='did not have a position'):
raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True,
montage=montage)
# when montage was passed - channel positions should be taken from there
correct_pos = [[-0.56705965, 0.67706631, 0.46906776], [np.nan] * 3,
[0., 0.99977915, -0.02101571]]
for ch_ind in range(3):
assert_array_almost_equal(raw.info['chs'][ch_ind]['loc'][:3],
np.array(correct_pos[ch_ind]))
# test reading channel names but not positions when there is no X (only Z)
# field in the EEG.chanlocs structure
nopos_fname = op.join(tmpdir, 'test_no_chanpos.set')
io.savemat(nopos_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 3,
'data': np.random.random((3, 2)), 'epoch': eeg.epoch,
'event': eeg.epoch, 'chanlocs': nopos_chanlocs,
'times': eeg.times[:2], 'pnts': 2}},
appendmat=False, oned_as='row')
# load the file
raw = read_raw_eeglab(input_fname=nopos_fname, preload=True)
# test that channel names have been loaded but not channel positions
for i in range(3):
assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i])
assert_array_equal(raw.info['chs'][i]['loc'][:3],
np.array([np.nan, np.nan, np.nan]))
@pytest.mark.timeout(60) # ~60 sec on Travis OSX
@requires_h5py
@testing.requires_testing_data
@pytest.mark.parametrize('fnames', [epochs_mat_fnames, epochs_h5_fnames])
def test_io_set_epochs(fnames):
"""Test importing EEGLAB .set epochs files."""
epochs_fname, epochs_fname_onefile = fnames
with pytest.warns(RuntimeWarning, match='multiple events'):
epochs = read_epochs_eeglab(epochs_fname)
with pytest.warns(RuntimeWarning, match='multiple events'):
epochs2 = read_epochs_eeglab(epochs_fname_onefile)
# one warning for each read_epochs_eeglab because both files have epochs
# associated with multiple events
assert_array_equal(epochs.get_data(), epochs2.get_data())
@testing.requires_testing_data
def test_io_set_epochs_events(tmpdir):
"""Test different combinations of events and event_ids."""
tmpdir = str(tmpdir)
out_fname = op.join(tmpdir, 'test-eve.fif')
events = np.array([[4, 0, 1], [12, 0, 2], [20, 0, 3], [26, 0, 3]])
write_events(out_fname, events)
event_id = {'S255/S8': 1, 'S8': 2, 'S255/S9': 3}
out_fname = op.join(tmpdir, 'test-eve.fif')
epochs = read_epochs_eeglab(epochs_fname_mat, events, event_id)
assert_equal(len(epochs.events), 4)
assert epochs.preload
assert epochs._bad_dropped
epochs = read_epochs_eeglab(epochs_fname_mat, out_fname, event_id)
pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat,
None, event_id)
pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat,
epochs.events, None)
@testing.requires_testing_data
def test_degenerate(tmpdir):
"""Test some degenerate conditions."""
# test if .dat file raises an error
tmpdir = str(tmpdir)
eeg = io.loadmat(epochs_fname_mat, struct_as_record=False,
squeeze_me=True)['EEG']
eeg.data = 'epochs_fname.dat'
bad_epochs_fname = op.join(tmpdir, 'test_epochs.set')
io.savemat(bad_epochs_fname,
{'EEG': {'trials': eeg.trials, 'srate': eeg.srate,
'nbchan': eeg.nbchan, 'data': eeg.data,
'epoch': eeg.epoch, 'event': eeg.event,
'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}},
appendmat=False, oned_as='row')
shutil.copyfile(op.join(base_dir, 'test_epochs.fdt'),
op.join(tmpdir, 'test_epochs.dat'))
with pytest.warns(RuntimeWarning, match='multiple events'):
pytest.raises(NotImplementedError, read_epochs_eeglab,
bad_epochs_fname)
@pytest.mark.parametrize("fname", raw_fnames)
@testing.requires_testing_data
def test_eeglab_annotations(fname):
"""Test reading annotations in EEGLAB files."""
_check_h5(fname)
annotations = read_annotations(fname)
assert len(annotations) == 154
assert set(annotations.description) == {'rt', 'square'}
assert np.all(annotations.duration == 0.)
@testing.requires_testing_data
def test_eeglab_read_annotations():
"""Test annotations onsets are timestamps (+ validate some)."""
annotations = read_annotations(raw_fname_mat)
validation_samples = [0, 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
expected_onset = np.array([1.00, 1.69, 2.08, 4.70, 7.71, 11.30, 17.18,
20.20, 26.12, 29.14, 35.25, 44.30, 47.15])
assert annotations.orig_time is None
assert_array_almost_equal(annotations.onset[validation_samples],
expected_onset, decimal=2)
# test if event durations are imported correctly
raw = read_raw_eeglab(raw_fname_event_duration, preload=True)
# file contains 3 annotations with 0.5 s (64 samples) duration each
assert_allclose(raw.annotations.duration, np.ones(3) * 0.5)
@testing.requires_testing_data
def test_eeglab_event_from_annot():
"""Test all forms of obtaining annotations."""
base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
raw_fname_mat = op.join(base_dir, 'test_raw.set')
raw_fname = raw_fname_mat
montage = op.join(base_dir, 'test_chans.locs')
event_id = {'rt': 1, 'square': 2}
raw1 = read_raw_eeglab(input_fname=raw_fname, montage=montage,
preload=False)
annotations = read_annotations(raw_fname)
assert len(raw1.annotations) == 154
raw1.set_annotations(annotations)
events_b, _ = events_from_annotations(raw1, event_id=event_id)
assert len(events_b) == 154
run_tests_if_main()
|
|
#!/usr/bin/python
# Copyright (c) 2015, Cloudera, inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Deploys a new Impala_Kudu service, either based on an existing Impala service
# or from scratch.
#
# Prerequisites:
# - A cluster running CDH 5.4.x and Cloudera Manager 5.4.x with x >= 7
# - CM API Python client (http://cloudera.github.io/cm_api/docs/python-client).
#
# Sample usage:
#
# ./deploy.py clone IMPALA_KUDU IMPALA-1
# Clones IMPALA-1 into a new Impala_Kudu service called "IMPALA_KUDU".
#
# ./deploy.py create new_service /data/impala/
# Creates a new Impala_Kudu service called "new_service" using /data/impala/
# for its scratch directories.
import argparse
import hashlib
import os
import re
import time
from cm_api.api_client import ApiResource
IMPALA_KUDU_PARCEL_URL = os.getenv("IMPALA_KUDU_PARCEL_URL",
"http://archive.cloudera.com/beta/impala-kudu/parcels/latest")
IMPALA_KUDU_PARCEL_PRODUCT = "IMPALA_KUDU"
MAX_PARCEL_REPO_WAIT_SECS = 60
MAX_PARCEL_WAIT_SECS = 60 * 30
SERVICE_DEPENDENCIES = {
"HDFS" : True,
"HIVE" : True,
"YARN" : False,
"HBASE" : False,
"SENTRY" : False,
"ZOOKEEPER" : False
}
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--host", type=str,
default="localhost",
help="Hostname of the Cloudera Manager server.")
parser.add_argument("--user", type=str,
default="admin",
help="Username with which to log into Cloudera Manager.")
parser.add_argument("--password", type=str,
default="admin",
help="Password with which to log into Cloudera Manager.")
parents_parser = argparse.ArgumentParser(add_help=False)
parents_parser.add_argument("--cluster", type=str,
help="Name of existing cluster where the Impala_Kudu service "
"should be added. If not specified, uses the only cluster or "
"raises an exception if multiple clusters are found.")
parents_parser.add_argument("service_name", type=str,
help="Name of Impala_Kudu service to create.")
subparsers = parser.add_subparsers(dest="subparsers_name")
clone_parser = subparsers.add_parser("clone",
parents=[parents_parser],
help="Use an existing Impala service as a template for "
"the new Impala_Kudu service. To be used when Impala_"
"Kudu is to run side-by-side with an existing Impala.")
clone_parser.add_argument("based_on", type=str,
help="Name of existing Impala service to clone as the basis for the "
"new service.")
create_parser = subparsers.add_parser("create",
parents=[parents_parser],
help="create a new Impala_Kudu service from scratch. To "
"be used when Impala_Kudu runs in its own cluster.")
create_parser.add_argument("--master_host", type=str,
help="Hostname where new Impala_Kudu service's master roles should "
"be placed. If not specified, uses the Cloudera Manager Server host "
"or raises an exception if that host is not managed.")
for service_type, required in SERVICE_DEPENDENCIES.iteritems():
create_parser.add_argument("--%s_dependency" % (service_type.lower(),),
type=str,
help="Name of %s service that the new Impala_Kudu service "
"should depend on. If not specified, will use only service of "
"that type in the cluster. Will raise an exception if exactly "
"one instance of that service is not found in the cluster. %s" %
(service_type, "REQUIRED." if required else ""))
create_parser.add_argument("scratch_dirs", type=str,
help="Comma-separated list of scratch directories to use in the new "
"Impala_Kudu service.")
return parser.parse_args()
def find_cluster(api, cluster_name):
all_clusters = api.get_all_clusters()
if not cluster_name and len(all_clusters) > 1:
raise Exception("Cannot use implicit cluster; there is more than one available")
for cluster in all_clusters:
if (cluster_name and cluster.name == cluster_name) or not cluster_name:
print "Found cluster: %s" % (cluster.name,)
return cluster
if cluster_name:
message = "Cannot find cluster: %s" % (cluster_name,)
else:
message = "Cannot find implicit cluster"
raise Exception(message)
def find_dependencies(args, cluster):
deps = []
# { service type : { service name : service }}
services_by_type = {}
for service in cluster.get_all_services():
service_dict = services_by_type.get(service.type, {})
service_dict[service.name] = service
services_by_type[service.type] = service_dict
for service_type, required in SERVICE_DEPENDENCIES.iteritems():
candidates = services_by_type.get(service_type, {})
arg = getattr(args, service_type.lower() + "_dependency")
if arg:
found = candidates.get(arg, None)
if not found:
raise Exception("Could not find dependency service (type %s, name %s)" %
(service_type, arg))
print "Found explicit dependency service %s" % (found.name)
deps.append(found)
else:
if not required:
print "Skipping optional dependency of type %s" % (service_type,)
continue
if len(candidates) > 1:
raise Exception("Found %d possible implicit dependency services of type %s" %
(len(candidates), service_type))
elif len(candidates) == 0:
raise Exception("Could not find implicit dependency service of type %s" %
(service_type,))
else:
found = candidates.values()[0]
print "Found implicit dependency service %s" % (found.name,)
deps.append(found)
return deps
def check_new_service_does_not_exist(api, cluster, new_name):
for service in cluster.get_all_services():
if service.displayName == new_name:
raise Exception("New service name %s already in use" % (new_name,))
print "New service name %s is not in use" % (new_name,)
def find_template_service(api, cluster, based_on):
template_service = None
for service in cluster.get_all_services():
if based_on and service.displayName == based_on:
if service.type != "IMPALA":
raise Exception("Based-on service %s is of wrong type %s" %
(based_on, service.type))
print "Found based-on service: %s" % (based_on,)
template_service = service
if based_on and not template_service:
raise Exception("Could not find based-on service: %s" % (based_on,))
return template_service
def find_master_host(api, cm_hostname, master_hostname):
for h in api.get_all_hosts():
if master_hostname and h.hostname == master_hostname:
print "Found master host %s" % (master_hostname,)
return h
elif not master_hostname and h.hostname == cm_hostname:
print "Found implicit master host on CM host %s" % (cm_hostname,)
return h
if master_hostname:
raise Exception("Could not find master host with hostname %s" % (master_hostname,))
else:
raise Exception("Could not find implicit master host %s" % (cm_hostname,))
def get_best_parcel(api, cluster):
parcels_available_remotely = []
parcels_downloaded = []
parcels_distributed = []
parcels_activated = []
for parcel in cluster.get_all_parcels():
if parcel.product == IMPALA_KUDU_PARCEL_PRODUCT:
if parcel.stage == "AVAILABLE_REMOTELY":
parcels_available_remotely.append(parcel)
elif parcel.stage == "DOWNLOADED":
parcels_downloaded.append(parcel)
elif parcel.stage == "DISTRIBUTED":
parcels_distributed.append(parcel)
elif parcel.stage == "ACTIVATED":
parcels_activated.append(parcel)
def parcel_cmp(p1, p2):
if p1.version < p2.version:
return -1
elif p1.version > p2.version:
return 1
else:
return 0
# Prefer the "closest" parcel, even if it's not the newest by version.
if len(parcels_activated) > 0:
parcel = sorted(parcels_activated, key=lambda parcel: parcel.version)[0]
elif len(parcels_distributed) > 0:
parcel = sorted(parcels_distributed, key=lambda parcel: parcel.version)[0]
elif len(parcels_downloaded) > 0:
parcel = sorted(parcels_downloaded, key=lambda parcel: parcel.version)[0]
elif len(parcels_available_remotely) > 0:
parcel = sorted(parcels_available_remotely, key=lambda parcel: parcel.version)[0]
else:
parcel = None
if parcel:
print "Chose best parcel %s-%s (stage %s)" % (parcel.product,
parcel.version,
parcel.stage)
else:
print "Found no candidate parcels"
return parcel
def ensure_parcel_repo_added(api):
cm = api.get_cloudera_manager()
config = cm.get_config(view='summary')
parcel_urls = config.get("REMOTE_PARCEL_REPO_URLS", "").split(",")
if IMPALA_KUDU_PARCEL_URL in parcel_urls:
print "Impala_Kudu parcel URL already present"
else:
print "Adding Impala_Kudu parcel URL"
parcel_urls.append(IMPALA_KUDU_PARCEL_URL)
config["REMOTE_PARCEL_REPO_URLS"] = ",".join(parcel_urls)
cm.update_config(config)
def wait_for_parcel_stage(cluster, parcel, stage):
for attempt in xrange(1, MAX_PARCEL_WAIT_SECS + 1):
new_parcel = cluster.get_parcel(parcel.product, parcel.version)
if new_parcel.stage == stage:
return
if new_parcel.state.errors:
raise Exception(str(new_parcel.state.errors))
print "progress: %s / %s" % (new_parcel.state.progress,
new_parcel.state.totalProgress)
time.sleep(1)
else:
raise Exception("Parcel %s-%s did not reach stage %s in %d seconds" %
(parcel.product, parcel.version, stage, MAX_PARCEL_WAIT_SECS,))
def ensure_parcel_activated(cluster, parcel):
parcel_stage = parcel.stage
if parcel_stage == "AVAILABLE_REMOTELY":
print "Downloading parcel: %s-%s " % (parcel.product, parcel.version)
parcel.start_download()
wait_for_parcel_stage(cluster, parcel, "DOWNLOADED")
print "Downloaded parcel: %s-%s " % (parcel.product, parcel.version)
parcel_stage = "DOWNLOADED"
if parcel_stage == "DOWNLOADED":
print "Distributing parcel: %s-%s " % (parcel.product, parcel.version)
parcel.start_distribution()
wait_for_parcel_stage(cluster, parcel, "DISTRIBUTED")
print "Distributed parcel: %s-%s " % (parcel.product, parcel.version)
parcel_stage = "DISTRIBUTED"
if parcel_stage == "DISTRIBUTED":
print "Activating parcel: %s-%s " % (parcel.product, parcel.version)
parcel.activate()
wait_for_parcel_stage(cluster, parcel, "ACTIVATED")
print "Activated parcel: %s-%s " % (parcel.product, parcel.version)
parcel_stage = "ACTIVATED"
print "Parcel %s-%s is activated" % (parcel.product, parcel.version)
def print_configs(entity_name, config_dict):
for attr, value in config_dict.iteritems():
print "Set %s config %s=\'%s\'" % (entity_name, attr, value)
def create_new_service(api, cluster, new_name, deps, scratch_dirs, master_host):
new_service = cluster.create_service(new_name, "IMPALA")
print "Created new service %s" % (new_name,)
service_config = {}
for d in deps:
service_config[d.type.lower() + "_service"] = d.name
service_config["impala_service_env_safety_valve"] = "IMPALA_KUDU=1"
new_service.update_config(service_config)
print_configs("service " + new_name, service_config)
for rcg in new_service.get_all_role_config_groups():
if rcg.roleType == "IMPALAD":
scratch_dirs_dict = { "scratch_dirs" : scratch_dirs }
rcg.update_config(scratch_dirs_dict)
print_configs("rcg " + rcg.displayName, scratch_dirs_dict)
for h in cluster.list_hosts():
if h.hostId == master_host.hostId:
continue
# This formula is embedded within CM. If we don't strictly
# adhere to it, we can't use any %s-%s-%s naming scheme.
md5 = hashlib.md5()
md5.update(h.hostId)
new_role_name = "%s-%s-%s" % (new_name, rcg.roleType, md5.hexdigest())
new_service.create_role(new_role_name, rcg.roleType, h.hostId)
print "Created new role %s" % (new_role_name,)
elif rcg.roleType == "LLAMA":
continue
else:
md5 = hashlib.md5()
md5.update(master_host.hostId)
new_role_name = "%s-%s-%s" % (new_name, rcg.roleType, md5.hexdigest())
new_service.create_role(new_role_name, rcg.roleType, master_host.hostId)
print "Created new role %s" % (new_role_name,)
def transform_path(rcg_name, rcg_config_dict, rcg_config_name):
# XXX: Do a better job with paths where the role type is embedded.
#
# e.g. /var/log/impalad/lineage --> /var/log/impalad2/lineage
val = rcg_config_dict.get(rcg_config_name, None)
if not val:
raise Exception("Could not get %s config for rcg %s" %
(rcg_config_name, rcg_name,))
new_val = re.sub(r"/(.*?)(/?)$", r"/\g<1>2\g<2>", val)
return {rcg_config_name : new_val}
def transform_port(rcg_name, rcg_config_dict, rcg_config_name):
# XXX: Actually resolve all port conflicts.
val = rcg_config_dict.get(rcg_config_name, None)
if not val:
raise Exception("Could not get %s config for rcg %s" %
(rcg_config_name, rcg_name,))
try:
val_int = int(val)
except ValueError, e:
raise Exception("Could not convert %s config (%s) for rcg %s into integer" %
(rcg_config_name, val, rcg_name))
new_val = str(val_int + 7)
return {rcg_config_name : new_val}
def transform_rcg_config(rcg):
summary = rcg.get_config()
full = {}
for name, config in rcg.get_config("full").iteritems():
full[name] = config.value if config.value else config.default
new_config = summary
if rcg.roleType == "IMPALAD":
new_config.update(transform_path(rcg.name, full, "audit_event_log_dir"))
new_config.update(transform_path(rcg.name, full, "lineage_event_log_dir"))
new_config.update(transform_path(rcg.name, full, "log_dir"))
new_config.update(transform_path(rcg.name, full, "scratch_dirs"))
new_config.update(transform_port(rcg.name, full, "be_port"))
new_config.update(transform_port(rcg.name, full, "beeswax_port"))
new_config.update(transform_port(rcg.name, full, "hs2_port"))
new_config.update(transform_port(rcg.name, full, "impalad_webserver_port"))
new_config.update(transform_port(rcg.name, full, "llama_callback_port"))
new_config.update(transform_port(rcg.name, full, "state_store_subscriber_port"))
elif rcg.roleType == "CATALOGSERVER":
new_config.update(transform_path(rcg.name, full, "log_dir"))
new_config.update(transform_port(rcg.name, full, "catalog_service_port"))
new_config.update(transform_port(rcg.name, full, "catalogserver_webserver_port"))
elif rcg.roleType == "LLAMA":
new_config.update(transform_path(rcg.name, full, "llama_log_dir"))
new_config.update(transform_port(rcg.name, full, "llama_am_server_thrift_admin_address"))
new_config.update(transform_port(rcg.name, full, "llama_http_port"))
new_config.update(transform_port(rcg.name, full, "llama_port"))
elif rcg.roleType == "STATESTORE":
new_config.update(transform_path(rcg.name, full, "log_dir"))
new_config.update(transform_port(rcg.name, full, "state_store_port"))
new_config.update(transform_port(rcg.name, full, "statestore_webserver_port"))
return new_config
def clone_existing_service(cluster, new_name, template_service):
new_service = cluster.create_service(new_name, "IMPALA")
print "Created new service %s" % (new_name,)
service_config, _ = template_service.get_config()
service_config["impala_service_env_safety_valve"] = "IMPALA_KUDU=1"
new_service.update_config(service_config)
print_configs("service " + new_name, service_config)
saved_special_port = None
i = 0
for old_rcg in template_service.get_all_role_config_groups():
if old_rcg.name != ("%s-%s-BASE" % (template_service.name, old_rcg.roleType)):
new_rcg_name = "%s-%s-%d" % (new_name, old_rcg.roleType, i)
i += 1
new_rcg = new_service.create_role_config_group(new_rcg_name,
new_rcg_name,
old_rcg.roleType)
print "Created new rcg %s" % (new_rcg_name,)
else:
new_rcg = new_service.get_role_config_group("%s-%s-BASE" % (new_name,
old_rcg.roleType))
new_rcg_config = transform_rcg_config(old_rcg)
new_rcg.update_config(new_rcg_config)
print_configs("rcg " + new_rcg.displayName, new_rcg_config)
special_port = new_rcg_config.get("state_store_subscriber_port", None)
if special_port:
saved_special_port = special_port
new_role_names = []
for old_role in old_rcg.get_all_roles():
md5 = hashlib.md5()
md5.update(old_role.hostRef.hostId)
new_role_name = "%s-%s-%s" % (new_name, new_rcg.roleType, md5.hexdigest())
new_role = new_service.create_role(new_role_name,
new_rcg.roleType,
old_role.hostRef.hostId)
print "Created new role %s" % (new_role_name,)
new_role_names.append(new_role.name)
new_rcg.move_roles(new_role_names)
for new_rcg in new_service.get_all_role_config_groups():
if new_rcg.roleType == "CATALOGSERVER":
special_port_config_dict = { "catalogd_cmd_args_safety_valve" :
"-state_store_subscriber_port=%s" % (saved_special_port,) }
new_rcg.update_config(special_port_config_dict)
print_configs("rcg " + new_rcg.displayName, special_port_config_dict)
def main():
args = parse_args()
api = ApiResource(args.host, username=args.user, password=args.password)
cluster = find_cluster(api, args.cluster)
check_new_service_does_not_exist(api, cluster, args.service_name)
if args.subparsers_name == "clone":
template_service = find_template_service(api, cluster, args.based_on)
else:
master_host = find_master_host(api, args.host, args.master_host)
deps = find_dependencies(args, cluster)
parcel = get_best_parcel(api, cluster)
if not parcel:
ensure_parcel_repo_added(api)
for attempt in xrange(1, MAX_PARCEL_REPO_WAIT_SECS + 1):
parcel = get_best_parcel(api, cluster)
if parcel:
break
print "Could not find parcel in attempt %d, will sleep and retry" % (attempt,)
time.sleep(1)
else:
raise Exception("No parcel showed up in %d seconds" % (MAX_PARCEL_REPO_WAIT_SECS,))
ensure_parcel_activated(cluster, parcel)
if args.subparsers_name == "create":
create_new_service(api, cluster, args.service_name, deps, args.scratch_dirs, master_host)
else:
clone_existing_service(cluster, args.service_name, template_service)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python
import sys
import itertools
import getopt
import copy
from fractions import gcd
from math import exp,log,sqrt,ceil,factorial
from time import time, gmtime, strftime
# Don't print debug info by default
debug = 0
# Overall reduction ratio
ratio_m = 60
ratio_n = 1
# Number of gear sets
stages = 2
# Upper limit for m+n
mn_max = 200
# Minimum gear count
n_min = 7
# Include reversal of all coprime pairs in search
reverse = 0
def usage():
print "-h,--help: This help information"
print "-d,--debug: Print additional information"
print "-r,--ratio: overall ratio:1 reduction ratio"
print "-s,--stages: Number of stages"
print "-m,--mn_max: Maximum m+n limit"
print "-n,--n_min: Minimum n value"
print "-v,--reverse: Include reverse ratios"
try:
opts, args = getopt.getopt(sys.argv[1:], "hdrsmnv:", ["help", "debug", "ratio=", "stages=", "mn_max=", "n_min=", "reverse"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt,arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-d", "--debug"):
debug = 1
elif opt in ("-r", "--ratio"):
if arg.find(':') != -1:
a, b = arg.split(':')
ratio_m = int(a)
ratio_n = int(b)
else:
ratio_m = int(arg)
ratio_n = 1
elif opt in ("-s", "--stages"):
stages = int(arg)
elif opt in ("-m", "--mn_max"):
mn_max = int(arg)
elif opt in ("-n", "--n_min"):
n_min = int(arg)
elif opt in ("-v", "--reverse"):
reverse = 1
else:
print "Unrecognized argument: "+opt
usage()
sys.exit()
def combinations_x(iterable, r):
if len(iterable) == r:
yield(iterable)
elif r == 1:
for i in iterable:
yield( i )
else:
for i in range(r-1, len(iterable)):
for j in combinations_x( range( i ), r-1 ):
if type(j) is int:
l = [j]
else:
l = list(j)
l.append(i)
yield( tuple( iterable[k] for k in l ) )
# Derived constants
r_target = exp(log(1.0*ratio_m/ratio_n)/stages)
if debug:
print "# r_target="+'{0:.6f}'.format(r_target)
# Normalize ratio
while gcd(ratio_m,ratio_n) > 1:
g = gcd(ratio_m,ratio_n)
ratio_m /= g
ratio_n /= g
# Input sanity checks
if 1.0*(mn_max - n_min) / n_min < r_target:
print "Maximum possible ratio: "+str(mn_max - n_min)+':'+str(n_min)+" is less than target ratio "+str(r_target)
sys.exit(2)
# Find prime factors of ratio_m and ratio_n
# Create a list of primes that could be factors of ratio_m or ratio_n
plimit = max(sqrt(ratio_m),sqrt(ratio_n))
p_list = [2]
for p in range(2,int(ceil(plimit))):
prime = 1
for q in p_list:
if p % q == 0:
prime = 0
break
if prime:
p_list.append(p)
m_factors = []
n_factors = []
mt = ratio_m
nt = ratio_n
for p in p_list:
while mt % p == 0:
mt /= p
m_factors.append(p)
while nt % p == 0:
nt /= p
n_factors.append(p)
if mt > 1: m_factors.append(mt)
if nt > 1: n_factors.append(nt)
if len(m_factors) == 0: m_factors.append(1)
if len(n_factors) == 0: n_factors.append(1)
if debug:
print "# m="+str(ratio_m)+": "+str(m_factors)
print "# n="+str(ratio_n)+": "+str(n_factors)
# Make sure largest prime can be implemented
if max(m_factors+n_factors) > mn_max - n_min:
print "Unable to implement largest prime factor: "+str(max(m_factors+n_factors))
sys.exit(2)
# Construct a list of coprime pairs that are m+n <= mn_max
q = [[2,1], [3,1]]
q1 = []
c_list = []
while len(q) + len(q1) > 0:
if len(q) == 0:
q = q1[:]
q1 = []
m,n = q.pop(0)
if m+n > mn_max: continue
c_list.append([m,n])
q1.append([2*m-n,m])
q1.append([2*m+n,m])
q1.append([m+2*n,n])
# Create a list that satifies both the n_min conditon,
# and is ranked by error from r_target
d_list = []
for i in c_list:
if i[1] >= n_min:
d_list.append([max(r_target/(1.0*i[0]/i[1]),(1.0*i[0]/i[1])/r_target), i])
if debug:
print "# testing "+str(len(d_list))+" out of "+str(len(c_list))+" coprimes"
e_list = []
for i in sorted(d_list):
e_list.append(i[1])
if (reverse):
e_list.append([i[1][1], i[1][0]])
# print CSV header
print "# "+" ".join(sys.argv)
print "r_dev, m_dev,",
for i in range(stages):
if i == stages - 1:
print "m"+str(i+1)+", n"+str(i+1)
else:
print "m"+str(i+1)+", n"+str(i+1)+",",
results = []
count = 0
final_count = factorial(len(e_list))/(factorial(stages)*factorial(len(e_list)-stages))
if debug:
print "# Testing "+str(final_count)+" combinations"
start_time = int(time())
target_time = start_time + 60
for i in combinations_x(e_list,stages):
count += 1
if debug:
cur_time = int(time())
if cur_time >= target_time:
completion_time = 1.0*(cur_time - start_time)/count*final_count - (cur_time - start_time)
r_h = int(completion_time / 3600)
r_m = int((completion_time - r_h * 3600) / 60)
r_s = int(completion_time - r_h * 3600 - r_m * 60)
target_time = cur_time + 60
print "# "+str(100.0*count/final_count)+"% "+'{:02d}'.format(r_h)+':'+'{:02d}'.format(r_m)+':'+'{:02d}'.format(r_s)+' remaining'
if 1:
# Older GCD reduction from total products
mt = 1
nt = 1
ratios = []
for p in i:
ratios.append(p)
mt *= p[0]
nt *= p[1]
while gcd(mt,nt) > 1:
g = gcd(mt,nt)
mt /= g
nt /= g
else:
# Newer GCD reduction
ic = copy.deepcopy(i)
for j in range(stages):
for k in range(stages):
if j == k: continue
while gcd(ic[j][0],ic[k][1]) > 1:
g = gcd(ic[j][0],ic[k][1])
ic[j][0] /= g
ic[k][1] /= g
mt = 1
nt = 1
ratios = []
for q in i:
ratios.append(q)
for p in ic:
mt *= p[0]
nt *= p[1]
if mt == ratio_m and nt == ratio_n:
m_max = 0
m_min = mn_max
r_dev = 0
for q in ratios:
m = q[0] + q[1]
r = max(r_target/(1.0*q[0]/q[1]),(1.0*q[0]/q[1])/r_target)
if m < m_min: m_min = m
if m > m_max: m_max = m
if r > r_dev: r_dev = r
m_dev = m_max - m_min
print '{0:.6f},'.format(r_dev),
print str(m_dev)+',',
for j in range(len(ratios)):
if j == len(ratios) -1:
print str(ratios[j][0])+', '+str(ratios[j][1])
else:
print str(ratios[j][0])+', '+str(ratios[j][1])+',',
|
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module contains all the classes that handle the intermediate
representation language. It is basically the REIL language with minor
changes. Below there is an overview of the REIL language and its
instruction format. For full details see "REIL: A platform-independent
intermediate representation of disassembled code for static code
analysis."
All algorithms within the framework are designed to operate on the
intermediate representation. This provides great flexibility when it
comes to implement a cross-platform framework.
Instruction Format
------------------
mnemonic oprnd1, oprnd2, oprnd3
Instructions
------------
Arithmetic : ADD, SUB, MUL, DIV, MOD, BSH
Bitwise : AND, OR, XOR
Data Transfer : LDM, STM, STR
Conditional : BISZ, JCC
Other : UNDEF, UNKN, NOP
"""
# Display operands size in intruction
show_size = True
# TODO: Create module util and move this function there.
def split_address(address):
return address >> 0x08, address & 0xff
class ReilMnemonic(object):
"""Enumeration of IR mnemonics.
"""
# Arithmetic Instructions
ADD = 1
SUB = 2
MUL = 3
DIV = 4
MOD = 5
BSH = 6
# Bitwise Instructions
AND = 7
OR = 8
XOR = 9
# Data Transfer Instructions
LDM = 10
STM = 11
STR = 12
# Conditional Instructions
BISZ = 13
JCC = 14
# Other Instructions
UNKN = 15
UNDEF = 16
NOP = 17
# Added Instructions
RET = 18
# Extensions
SEXT = 19
@staticmethod
def to_string(mnemonic):
"""Return the string representation of the given mnemonic.
"""
strings = {
# Arithmetic Instructions
ReilMnemonic.ADD : "add",
ReilMnemonic.SUB : "sub",
ReilMnemonic.MUL : "mul",
ReilMnemonic.DIV : "div",
ReilMnemonic.MOD : "mod",
ReilMnemonic.BSH : "bsh",
# Bitwise Instructions
ReilMnemonic.AND : "and",
ReilMnemonic.OR : "or",
ReilMnemonic.XOR : "xor",
# Data Transfer Instructions
ReilMnemonic.LDM : "ldm",
ReilMnemonic.STM : "stm",
ReilMnemonic.STR : "str",
# Conditional Instructions
ReilMnemonic.BISZ : "bisz",
ReilMnemonic.JCC : "jcc",
# Other Instructions
ReilMnemonic.UNKN : "unkn" ,
ReilMnemonic.UNDEF : "undef" ,
ReilMnemonic.NOP : "nop" ,
# Added Instructions
ReilMnemonic.RET : "ret",
# Extensions
ReilMnemonic.SEXT : "sext",
}
return strings[mnemonic]
@staticmethod
def from_string(string):
"""Return the mnemonic represented by the given string.
"""
mnemonics = {
# Arithmetic Instructions
"add" : ReilMnemonic.ADD,
"sub" : ReilMnemonic.SUB,
"mul" : ReilMnemonic.MUL,
"div" : ReilMnemonic.DIV,
"mod" : ReilMnemonic.MOD,
"bsh" : ReilMnemonic.BSH,
# Bitwise Instructions
"and" : ReilMnemonic.AND,
"or" : ReilMnemonic.OR,
"xor" : ReilMnemonic.XOR,
# Data Transfer Instructions
"ldm" : ReilMnemonic.LDM,
"stm" : ReilMnemonic.STM,
"str" : ReilMnemonic.STR,
# Conditional Instructions
"bisz" : ReilMnemonic.BISZ,
"jcc" : ReilMnemonic.JCC,
# Other Instructions
"unkn" : ReilMnemonic.UNKN,
"undef" : ReilMnemonic.UNDEF,
"nop" : ReilMnemonic.NOP,
# Added Instructions
"ret" : ReilMnemonic.RET,
# Added Instructions
"sext" : ReilMnemonic.SEXT,
}
return mnemonics[string]
REIL_MNEMONICS = (
# Arithmetic Instructions
ReilMnemonic.ADD,
ReilMnemonic.SUB,
ReilMnemonic.MUL,
ReilMnemonic.DIV,
ReilMnemonic.MOD,
ReilMnemonic.BSH,
# Bitwise Instructions
ReilMnemonic.AND,
ReilMnemonic.OR,
ReilMnemonic.XOR,
# Data Transfer Instructions
ReilMnemonic.LDM,
ReilMnemonic.STM,
ReilMnemonic.STR,
# Conditional Instructions
ReilMnemonic.BISZ,
ReilMnemonic.JCC,
# Other Instructions
ReilMnemonic.UNKN,
ReilMnemonic.UNDEF,
ReilMnemonic.NOP,
# Added Instructions
ReilMnemonic.RET,
# Extensions
ReilMnemonic.SEXT,
)
class ReilInstruction(object):
"""Representation of a REIL instruction.
"""
__slots__ = [
'_mnemonic',
'_operands',
'_comment',
'_address',
]
def __init__(self):
# A REIL mnemonic
self._mnemonic = None
# A list of operand. Exactly 3.
self._operands = [ReilEmptyOperand()] * 3
# Optionally, a comment for the instruction.
self._comment = None
# A REIL address for the instruction.
self._address = None
@property
def mnemonic(self):
"""Get instruction mnemonic.
"""
return self._mnemonic
@property
def mnemonic_str(self):
"""Get instruction mnemonic as string.
"""
return ReilMnemonic.to_string(self._mnemonic)
@mnemonic.setter
def mnemonic(self, value):
"""Set instruction mnemonic.
"""
if value not in REIL_MNEMONICS:
raise Exception("Invalid instruction mnemonic : %s" % str(value))
self._mnemonic = value
@property
def operands(self):
"""Get instruction operands.
"""
return self._operands
@operands.setter
def operands(self, value):
"""Set instruction operands.
"""
if len(value) != 3:
raise Exception("Invalid instruction operands : %s" % str(value))
self._operands = value
@property
def address(self):
"""Get instruction address.
"""
return self._address
@address.setter
def address(self, value):
"""Set instruction address.
"""
self._address = value
@property
def comment(self):
"""Get instruction comment.
"""
return self._comment
@comment.setter
def comment(self, value):
"""Set instruction comment.
"""
self._comment = value
def __str__(self):
def print_oprnd(oprnd):
oprnd_str = str(oprnd)
size_str = str(oprnd.size) if oprnd.size else ""
sizes = {
256 : "DDQWORD",
128 : "DQWORD",
72 : "POINTER",
64 : "QWORD",
40 : "POINTER",
32 : "DWORD",
16 : "WORD",
8 : "BYTE",
1 : "BIT",
"" : "UNK",
}
if isinstance(oprnd, ReilEmptyOperand):
return "%s" % (oprnd_str)
else:
return "%s %s" % (sizes[oprnd.size if oprnd.size else ""], oprnd_str)
mnemonic_str = ReilMnemonic.to_string(self._mnemonic)
if show_size:
operands_str = ", ".join(map(print_oprnd, self._operands))
else:
operands_str = ", ".join(map(str, self._operands))
return "%-5s [%s]" % (mnemonic_str, operands_str)
def __hash__(self):
return hash(str(self))
def __getstate__(self):
state = {}
state['_mnemonic'] = self._mnemonic
state['_operands'] = self._operands
state['_comment'] = self._comment
state['_address'] = self._address
return state
def __setstate__(self, state):
self._mnemonic = state['_mnemonic']
self._operands = state['_operands']
self._comment = state['_comment']
self._address = state['_address']
class ReilOperand(object):
"""Representation of an IR instruction's operand.
"""
__slots__ = [
'_size',
]
def __init__(self, size):
# Size of the operand, in bits.
self._size = size
@property
def size(self):
"""Get operand size.
"""
return self._size
@size.setter
def size(self, value):
"""Set operand size.
"""
self._size = value
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other._size
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
state['_size'] = self._size
return state
def __setstate__(self, state):
self._size = state['_size']
class ReilImmediateOperand(ReilOperand):
"""Representation of a REIL instruction immediate operand.
"""
__slots__ = [
'_immediate',
]
def __init__(self, immediate, size=None):
super(ReilImmediateOperand, self).__init__(size)
assert type(immediate) in [int, long], "Invalid immediate value type."
self._immediate = immediate
@property
def immediate(self):
"""Get immediate.
"""
if not self._size:
raise Exception("Operand size missing.")
return self._immediate & 2**self._size-1
def __str__(self):
if not self._size:
raise Exception("Operand size missing.")
string = hex(self._immediate & 2**self._size-1)
return string[:-1] if string[-1] == 'L' else string
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other._size and \
self._immediate == other._immediate
def __getstate__(self):
state = super(ReilImmediateOperand, self).__getstate__()
state['_immediate'] = self._immediate
return state
def __setstate__(self, state):
super(ReilImmediateOperand, self).__setstate__(state)
self._immediate = state['_immediate']
class ReilRegisterOperand(ReilOperand):
"""Representation of a REIL instruction register operand.
"""
__slots__ = [
'_name',
]
def __init__(self, name, size=None):
super(ReilRegisterOperand, self).__init__(size)
# Register name.
self._name = name
@property
def name(self):
"""Get IR register operand name.
"""
return self._name
def __str__(self):
return self._name
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other._size and \
self._name == other._name
def __getstate__(self):
state = super(ReilRegisterOperand, self).__getstate__()
state['_name'] = self._name
return state
def __setstate__(self, state):
super(ReilRegisterOperand, self).__setstate__(state)
self._name = state['_name']
class ReilEmptyOperand(ReilRegisterOperand):
"""Representation of an IR instruction's empty operand.
"""
def __init__(self):
super(ReilEmptyOperand, self).__init__("EMPTY", size=None)
class ReilInstructionBuilder(object):
"""REIL Instruction Builder. Generate REIL instructions, easily.
"""
# Arithmetic Instructions
# ======================================================================== #
def gen_add(self, src1, src2, dst):
"""Return an ADD instruction.
"""
return self.build(ReilMnemonic.ADD, src1, src2, dst)
def gen_sub(self, src1, src2, dst):
"""Return a SUB instruction.
"""
return self.build(ReilMnemonic.SUB, src1, src2, dst)
def gen_mul(self, src1, src2, dst):
"""Return a MUL instruction.
"""
return self.build(ReilMnemonic.MUL, src1, src2, dst)
def gen_div(self, src1, src2, dst):
"""Return a DIV instruction.
"""
return self.build(ReilMnemonic.DIV, src1, src2, dst)
def gen_mod(self, src1, src2, dst):
"""Return a MOD instruction.
"""
return self.build(ReilMnemonic.MOD, src1, src2, dst)
def gen_bsh(self, src1, src2, dst):
"""Return a BSH instruction.
"""
return self.build(ReilMnemonic.BSH, src1, src2, dst)
# Bitwise Instructions
# ======================================================================== #
def gen_and(self, src1, src2, dst):
"""Return an AND instruction.
"""
return self.build(ReilMnemonic.AND, src1, src2, dst)
def gen_or(self, src1, src2, dst):
"""Return an OR instruction.
"""
return self.build(ReilMnemonic.OR, src1, src2, dst)
def gen_xor(self, src1, src2, dst):
"""Return a XOR instruction.
"""
return self.build(ReilMnemonic.XOR, src1, src2, dst)
# Data Transfer Instructions
# ======================================================================== #
def gen_ldm(self, src, dst):
"""Return a LDM instruction.
"""
return self.build(ReilMnemonic.LDM, src, ReilEmptyOperand(), dst)
def gen_stm(self, src, dst):
"""Return a STM instruction.
"""
return self.build(ReilMnemonic.STM, src, ReilEmptyOperand(), dst)
def gen_str(self, src, dst):
"""Return a STR instruction.
"""
return self.build(ReilMnemonic.STR, src, ReilEmptyOperand(), dst)
# Conditional Instructions
# ======================================================================== #
def gen_bisz(self, src, dst):
"""Return a BISZ instruction.
"""
return self.build(ReilMnemonic.BISZ, src, ReilEmptyOperand(), dst)
def gen_jcc(self, src, dst):
"""Return a JCC instruction.
"""
return self.build(ReilMnemonic.JCC, src, ReilEmptyOperand(), dst)
# Other Instructions
# ======================================================================== #
def gen_unkn(self):
"""Return an UNKN instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.UNKN, empty_reg, empty_reg, empty_reg)
def gen_undef(self):
"""Return an UNDEF instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.UNDEF, empty_reg, empty_reg, empty_reg)
def gen_nop(self):
"""Return a NOP instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.NOP, empty_reg, empty_reg, empty_reg)
# Ad hoc Instructions
# ======================================================================== #
def gen_ret(self):
"""Return a RET instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.RET, empty_reg, empty_reg, empty_reg)
# Extensions
# ======================================================================== #
def gen_sext(self, src, dst):
"""Return a SEXT instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.SEXT, src, empty_reg, dst)
# Auxiliary functions
# ======================================================================== #
def build(self, mnemonic, oprnd1, oprnd2, oprnd3):
"""Return the specified instruction.
"""
ins = ReilInstruction()
ins.mnemonic = mnemonic
ins.operands = [oprnd1, oprnd2, oprnd3]
return ins
class DualInstruction(object):
"""Represents an assembler instruction paired with its IR
representation.
"""
__slots__ = [
'_address',
'_asm_instr',
'_ir_instrs',
]
def __init__(self, address, asm_instr, ir_instrs):
# Address of the assembler instruction.
self._address = address
# Assembler instruction.
self._asm_instr = asm_instr
# REIL translation of the assembler instruction. Note that one
# assemlber instruction is mapped to more than one REIL
# instruction.
self._ir_instrs = ir_instrs
@property
def address(self):
"""Get instruction address.
"""
return self._address
@property
def asm_instr(self):
"""Get assembly instruction.
"""
return self._asm_instr
@property
def ir_instrs(self):
"""Get IR representation of the assembly instruction.
"""
return self._ir_instrs
def __eq__(self, other):
return self.address == other.address and \
self.asm_instr == other.asm_instr
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
state['_address'] = self._address
state['_asm_instr'] = self._asm_instr
state['_ir_instrs'] = self._ir_instrs
return state
def __setstate__(self, state):
self._address = state['_address']
self._asm_instr = state['_asm_instr']
self._ir_instrs = state['_ir_instrs']
class ReilSequence(object):
"""Reil instruction sequence.
"""
def __init__(self):
self.__sequence = []
self.__next_seq_address = None
def append(self, instruction):
self.__sequence.append(instruction)
def get(self, index):
return self.__sequence[index]
def dump(self):
for instr in self.__sequence:
base_addr, index = split_address(instr.address)
print("{:08x}:{:02x}\t{}".format(base_addr, index, instr))
@property
def address(self):
return self.__sequence[0].address if self.__sequence else None
@property
def next_sequence_address(self):
return self.__next_seq_address
@next_sequence_address.setter
def next_sequence_address(self, address):
self.__next_seq_address = address
def __len__(self):
return len(self.__sequence)
def __iter__(self):
for instr in self.__sequence:
yield instr
class ReilContainerInvalidAddressError(Exception):
pass
class ReilContainer(object):
"""Reil instruction container.
"""
def __init__(self):
self.__container = {}
def add(self, sequence):
base_addr, _ = split_address(sequence.address)
if base_addr in self.__container.keys():
raise Exception("Invalid sequence")
self.__container[base_addr] = sequence
def fetch(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
raise ReilContainerInvalidAddressError()
return self.__container[base_addr].get(index)
def get_next_address(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
raise Exception("Invalid address.")
addr = address
if index < len(self.__container[base_addr]) - 1:
addr += 1
else:
addr = self.__container[base_addr].next_sequence_address
return addr
def dump(self):
for base_addr in sorted(self.__container.keys()):
self.__container[base_addr].dump()
print("-" * 80)
def __iter__(self):
for addr in sorted(self.__container.keys()):
for instr in self.__container[addr]:
yield instr
|
|
"""
UNIT 3: Functions and APIs: Polynomials
A polynomial is a mathematical formula like:
30 * x**2 + 20 * x + 10
More formally, it involves a single variable (here 'x'), and the sum of one
or more terms, where each term is a real number multiplied by the variable
raised to a non-negative integer power. (Remember that x**0 is 1 and x**1 is x,
so 'x' is short for '1 * x**1' and '10' is short for '10 * x**0'.)
We will represent a polynomial as a Python function which computes the formula
when applied to a numeric value x. The function will be created with the call:
p1 = poly((10, 20, 30))
where the nth element of the input tuple is the coefficient of the nth power of x.
(Note the order of coefficients has the x**n coefficient neatly in position n of
the list, but this is the reversed order from how we usually write polynomials.)
poly returns a function, so we can now apply p1 to some value of x:
p1(0) == 10
Our representation of a polynomial is as a callable function, but in addition,
we will store the coefficients in the .coefs attribute of the function, so we have:
p1.coefs == (10, 20, 30)
And finally, the name of the function will be the formula given above, so you should
have something like this:
>>> p1
<function 30 * x**2 + 20 * x + 10 at 0x100d71c08>
>>> p1.__name__
'30 * x**2 + 20 * x + 10'
Make sure the formula used for function names is simplified properly.
No '0 * x**n' terms; just drop these. Simplify '1 * x**n' to 'x**n'.
Simplify '5 * x**0' to '5'. Similarly, simplify 'x**1' to 'x'.
For negative coefficients, like -5, you can use '... + -5 * ...' or
'... - 5 * ...'; your choice. I'd recommend no spaces around '**'
and spaces around '+' and '*', but you are free to use your preferences.
Your task is to write the function poly and the following additional functions:
is_poly, add, sub, mul, power, deriv, integral
They are described below; see the test_poly function for examples.
"""
from collections import defaultdict
def break_poly(coefs):
fn = ''
for i in range(len(coefs)-1,-1,-1):
if coefs[i] == 0:
fn += ''
elif coefs[i] == 1:
if i == 0:
fn += '1 + '
elif i == 1:
fn += 'x + '
else:
fn += 'x**%s + ' % (i)
elif i == 0:
fn += '%s + ' % (coefs[i])
elif i == 1:
fn += '%s * x + ' % (coefs[i])
else:
fn += '%s * x**%s + ' % (coefs[i], i)
fn = fn[:-3]
return fn
def poly(coefs):
"""Return a function that represents the polynomial with these coefficients.
For example, if coefs=(10, 20, 30), return the function of x that computes
'30 * x**2 + 20 * x + 10'. Also store the coefs on the .coefs attribute of
the function, and the str of the formula on the .__name__ attribute.'"""
# your code here (I won't repeat "your code here"; there's one for each function)
polynomial = break_poly(coefs)
def rez(x):
return eval(polynomial)
rez.coefs = coefs
rez.__name__ = polynomial
return rez
def Poly(coefs):
def rez(x):
return eval(coefs)
rez.coefs = (0,0,0)
rez.__name__ = coefs
return rez
def test_poly():
global p1, p2, p3, p4, p5, p9 # global to ease debugging in an interactive session
p1 = poly((10, 20, 30))
assert p1(0) == 10
for x in (1, 2, 3, 4, 5, 1234.5):
assert p1(x) == 30 * x**2 + 20 * x + 10
assert same_name(p1.__name__, '30 * x**2 + 20 * x + 10')
assert is_poly(p1)
assert not is_poly(abs) and not is_poly(42) and not is_poly('cracker')
p3 = poly((0, 0, 0, 1))
assert p3.__name__ == 'x**3'
p9 = mul(p3, mul(p3, p3))
assert p9(2) == 512
p4 = add(p1, p3)
assert same_name(p4.__name__, 'x**3 + 30 * x**2 + 20 * x + 10')
assert same_name(poly((1, 1)).__name__, 'x + 1')
assert same_name(power(poly((1, 1)), 10).__name__,
'x**10 + 10 * x**9 + 45 * x**8 + 120 * x**7 + 210 * x**6 + 252 * x**5 + 210' +
' * x**4 + 120 * x**3 + 45 * x**2 + 10 * x + 1')
assert add(poly((10, 20, 30)), poly((1, 2, 3))).coefs == (11,22,33)
assert sub(poly((10, 20, 30)), poly((1, 2, 3))).coefs == (9,18,27)
assert mul(poly((10, 20, 30)), poly((1, 2, 3))).coefs == (10, 40, 100, 120, 90)
assert power(poly((1, 1)), 2).coefs == (1, 2, 1)
assert power(poly((1, 1)), 10).coefs == (1, 10, 45, 120, 210, 252, 210, 120, 45, 10, 1)
assert deriv(p1).coefs == (20, 60)
assert integral(poly((20, 60))).coefs == (0, 20, 30)
p5 = poly((0, 1, 2, 3, 4, 5))
assert same_name(p5.__name__, '5 * x**5 + 4 * x**4 + 3 * x**3 + 2 * x**2 + x')
assert p5(1) == 15
assert p5(2) == 258
assert same_name(deriv(p5).__name__, '25 * x**4 + 16 * x**3 + 9 * x**2 + 4 * x + 1')
assert deriv(p5)(1) == 55
assert deriv(p5)(2) == 573
def same_name(name1, name2):
"""I define this function rather than doing name1 == name2 to allow for some
variation in naming conventions."""
def canonical_name(name): return name.replace(' ', '').replace('+-', '-')
return canonical_name(name1) == canonical_name(name2)
def is_poly(x):
"Return true if x is a poly (polynomial)."
## For examples, see the test_poly function
poly = True
try:
x.coefs
except AttributeError:
poly = False
return poly
def add(p1, p2):
"Return a new polynomial which is the sum of polynomials p1 and p2."
coefs = []
hack = len(p1.coefs) - len(p2.coefs)
if hack < 0:
np1_coefs = p1.coefs + (0,)*abs(hack)
np2_coefs = p2.coefs
elif hack > 0:
np2_coefs = p2.coefs + (0,)*hack
np1_coefs = p1.coefs
else:
np1_coefs = p1.coefs
np2_coefs = p2.coefs
for i in range(len(np1_coefs)):
coefs.append(np1_coefs[i] + np2_coefs[i])
return poly(tuple(coefs))
def sub(p1, p2):
"Return a new polynomial which is the difference of polynomials p1 and p2."
coefs = []
hack = len(p1.coefs) - len(p2.coefs)
if hack < 0:
np1_coefs = p1.coefs + (0,)*abs(hack)
np2_coefs = p2.coefs
elif hack > 0:
np2_coefs = p2.coefs + (0,)*hack
np1_coefs = p1.coefs
else:
np1_coefs = p1.coefs
np2_coefs = p2.coefs
for i in range(len(np1_coefs)):
coefs.append(np1_coefs[i] - np2_coefs[i])
return poly(tuple(coefs))
def mul(p1, p2):
"Return a new polynomial which is the product of polynomials p1 and p2."
d = defaultdict(lambda:0)
for c1 in range(len(p1.coefs)):
for c2 in range(len(p2.coefs)):
d[c1+c2] += p1.coefs[c1] * p2.coefs[c2]
return poly(tuple([d[i] for i in d]))
def power(p, n):
"Return a new polynomial which is p to the nth power (n a non-negative integer)."
polynom = p
for _ in range(n-1):
polynom = mul(polynom, p)
return polynom
# power(poly((1,2,3)), 3)
"""
If your calculus is rusty (or non-existant), here is a refresher:
The deriviative of a polynomial term (c * x**n) is (c*n * x**(n-1)).
The derivative of a sum is the sum of the derivatives.
So the derivative of (30 * x**2 + 20 * x + 10) is (60 * x + 20).
The integral is the anti-derivative:
The integral of 60 * x + 20 is 30 * x**2 + 20 * x + C, for any constant C.
Any value of C is an equally good anti-derivative. We allow C as an argument
to the function integral (withh default C=0).
"""
def deriv(p):
"Return the derivative of a function p (with respect to its argument)."
coefs = []
inv = p.coefs
for el in range(1,len(p.coefs)):
coefs.append(inv[el]*el)
return poly(tuple(coefs))
def integral(p, C=0):
"Return the integral of a function p (with respect to its argument)."
coefs = ()
inv = (C,) + p.coefs
for el in range(len(inv)):
if el != 0:
coefs += (inv[el]/el,)
else:
coefs += (C,)
return poly(coefs)
"""
Now for an extra credit challenge: arrange to describe polynomials with an
expression like '3 * x**2 + 5 * x + 9' rather than (9, 5, 3). You can do this
in one (or both) of two ways:
(1) By defining poly as a class rather than a function, and overloading the
__add__, __sub__, __mul__, and __pow__ operators, etc. If you choose this,
call the function test_poly1(). Make sure that poly objects can still be called.
(2) Using the grammar parsing techniques we learned in Unit 5. For this
approach, define a new function, Poly, which takes one argument, a string,
as in Poly('30 * x**2 + 20 * x + 10'). Call test_poly2().
"""
def test_poly1():
# I define x as the polynomial 1*x + 0.
x = poly((0, 1))
# From here on I can create polynomials by + and * operations on x.
newp1 = 30 * x**2 + 20 * x + 10 # This is a poly object, not a number!
assert p1(100) == newp1(100) # The new poly objects are still callable.
assert same_name(p1.__name__,newp1.__name__)
assert (x + 1) * (x - 1) == x**2 - 1 == poly((-1, 0, 1))
def test_poly2():
newp1 = Poly('30 * x**2 + 20 * x + 10')
assert p1(100) == newp1(100)
assert same_name(p1.__name__,newp1.__name__)
test_poly()
test_poly2()
|
|
#!/usr/bin/env python
"""Bootstrap setuptools installation
To use setuptools in your package's setup.py, include this
file in the same directory and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
To require a specific version of setuptools, set a download
mirror, or use an alternate download directory, simply supply
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "5.4.1"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
# extracting the archive
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main():
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
archive = download_setuptools(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
)
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext, dirname, exists
from os import getenv
from distutils.spawn import find_executable
from distutils.version import LooseVersion
from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS
from tools.hooks import hook_tool
from tools.utils import run_cmd, NotSupportedException
class GCC(mbedToolchain):
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
DIAGNOSTIC_PATTERN = re.compile('((?P<file>[^:]+):(?P<line>\d+):)(?P<col>\d+):? (?P<severity>warning|[eE]rror|fatal error): (?P<message>.+)')
GCC_RANGE = (LooseVersion("6.0.0"), LooseVersion("7.0.0"))
GCC_VERSION_RE = re.compile(b"\d+\.\d+\.\d+")
def __init__(self, target, notify=None, macros=None, build_profile=None,
build_dir=None):
mbedToolchain.__init__(self, target, notify, macros,
build_profile=build_profile, build_dir=build_dir)
tool_path=TOOLCHAIN_PATHS['GCC_ARM']
# Add flags for current size setting
default_lib = "std"
if hasattr(target, "default_lib"):
default_lib = target.default_lib
elif hasattr(target, "default_build"): # Legacy
default_lib = target.default_build
if default_lib == "small":
self.flags["common"].append("-DMBED_RTOS_SINGLE_THREAD")
self.flags["ld"].append("--specs=nano.specs")
if target.core == "Cortex-M0+":
self.cpu = ["-mcpu=cortex-m0plus"]
elif target.core.startswith("Cortex-M4"):
self.cpu = ["-mcpu=cortex-m4"]
elif target.core.startswith("Cortex-M7"):
self.cpu = ["-mcpu=cortex-m7"]
elif target.core.startswith("Cortex-M23"):
self.cpu = ["-mcpu=cortex-m23"]
elif target.core.startswith("Cortex-M33F"):
self.cpu = ["-mcpu=cortex-m33"]
elif target.core.startswith("Cortex-M33"):
self.cpu = ["-march=armv8-m.main"]
else:
self.cpu = ["-mcpu={}".format(target.core.lower())]
if target.core.startswith("Cortex-M"):
self.cpu.append("-mthumb")
# FPU handling, M7 possibly to have double FPU
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7F":
self.cpu.append("-mfpu=fpv5-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7FD":
self.cpu.append("-mfpu=fpv5-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
if ((target.core.startswith("Cortex-M23") or
target.core.startswith("Cortex-M33")) and
not target.core.endswith("-NS")):
self.cpu.append("-mcmse")
self.flags["ld"].extend([
"-Wl,--cmse-implib",
"-Wl,--out-implib=%s" % join(build_dir, "cmse_lib.o")
])
elif target.core == "Cortex-M23-NS" or target.core == "Cortex-M33-NS":
self.flags["ld"].append("-DDOMAIN_NS=1")
self.flags["common"] += self.cpu
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc] + self.flags['asm'] + self.flags["common"]
self.cc = [main_cc]
self.cppc =[main_cppc]
self.cc += self.flags['c'] + self.flags['common']
self.cppc += self.flags['cxx'] + self.flags['common']
self.flags['ld'] += self.cpu
self.ld = [join(tool_path, "arm-none-eabi-gcc")] + self.flags['ld']
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc", "nosys"]
self.preproc = [join(tool_path, "arm-none-eabi-cpp"), "-E", "-P"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
self.use_distcc = (bool(getenv("DISTCC_POTENTIAL_HOSTS", False))
and not getenv("MBED_DISABLE_DISTCC", False))
def version_check(self):
stdout, _, retcode = run_cmd([self.cc[0], "--version"], redirect=True)
msg = None
match = self.GCC_VERSION_RE.search(stdout)
found_version = LooseVersion(match.group(0).decode('utf-8')) if match else None
min_ver, max_ver = self.GCC_RANGE
if found_version and (found_version < min_ver or found_version >= max_ver):
msg = ("Compiler version mismatch: Have {}; "
"expected version >= {} and < {}"
.format(found_version, min_ver, max_ver))
elif not match:
msg = ("Compiler version mismatch: Could not detect version; "
"expected version >= {} and < {}"
.format(min_ver, max_ver))
if msg:
self.notify.cc_info({
"message": msg,
"file": "",
"line": "",
"col": "",
"severity": "Warning",
})
def is_not_supported_error(self, output):
return "error: #error [NOT_SUPPORTED]" in output
def parse_output(self, output):
# The warning/error notification is multiline
msg = None
for line in output.splitlines():
match = self.DIAGNOSTIC_PATTERN.search(line)
if match is not None:
if msg is not None:
self.notify.cc_info(msg)
msg = None
msg = {
'severity': match.group('severity').lower(),
'file': match.group('file'),
'line': match.group('line'),
'col': match.group('col'),
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name
}
if msg is not None:
self.notify.cc_info(msg)
def get_dep_option(self, object):
base, _ = splitext(object)
dep_path = base + '.d'
return ["-MD", "-MF", dep_path]
def get_config_option(self, config_header):
return ['-include', config_header]
def get_compile_options(self, defines, includes, for_asm=False):
opts = ['-D%s' % d for d in defines]
if self.RESPONSE_FILES:
opts += ['@%s' % self.get_inc_file(includes)]
else:
opts += ["-I%s" % i for i in includes]
config_header = self.get_config_header()
if config_header is not None:
opts = opts + self.get_config_option(config_header)
return opts
@hook_tool
def assemble(self, source, object, includes):
# Build assemble command
cmd = self.asm + self.get_compile_options(self.get_symbols(True), includes) + ["-o", object, source]
# Call cmdline hook
cmd = self.hook.get_cmdline_assembler(cmd)
# Return command array, don't execute
return [cmd]
@hook_tool
def compile(self, cc, source, object, includes):
# Build compile command
cmd = cc + self.get_compile_options(self.get_symbols(), includes)
cmd.extend(self.get_dep_option(object))
cmd.extend(["-o", object, source])
# Call cmdline hook
cmd = self.hook.get_cmdline_compiler(cmd)
if self.use_distcc:
cmd = ["distcc"] + cmd
return [cmd]
def compile_c(self, source, object, includes):
return self.compile(self.cc, source, object, includes)
def compile_cpp(self, source, object, includes):
return self.compile(self.cppc, source, object, includes)
@hook_tool
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# Preprocess
if mem_map:
preproc_output = join(dirname(output), ".link_script.ld")
cmd = (self.preproc + [mem_map] + self.ld[1:] +
[ "-o", preproc_output])
self.notify.cc_verbose("Preproc: %s" % ' '.join(cmd))
self.default_cmd(cmd)
mem_map = preproc_output
# Build linker command
map_file = splitext(output)[0] + ".map"
cmd = self.ld + ["-o", output, "-Wl,-Map=%s" % map_file] + objects + ["-Wl,--start-group"] + libs + ["-Wl,--end-group"]
if mem_map:
cmd.extend(['-T', mem_map])
for L in lib_dirs:
cmd.extend(['-L', L])
cmd.extend(libs)
# Call cmdline hook
cmd = self.hook.get_cmdline_linker(cmd)
if self.RESPONSE_FILES:
# Split link command to linker executable + response file
cmd_linker = cmd[0]
link_files = self.get_link_file(cmd[1:])
cmd = [cmd_linker, "@%s" % link_files]
# Exec command
self.notify.cc_verbose("Link: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@hook_tool
def archive(self, objects, lib_path):
if self.RESPONSE_FILES:
param = ["@%s" % self.get_arch_file(objects)]
else:
param = objects
# Exec command
self.default_cmd([self.ar, 'rcs', lib_path] + param)
@hook_tool
def binary(self, resources, elf, bin):
# Build binary command
_, fmt = splitext(bin)
bin_arg = {'.bin': 'binary', '.hex': 'ihex'}[fmt]
cmd = [self.elf2bin, "-O", bin_arg, elf, bin]
# Call cmdline hook
cmd = self.hook.get_cmdline_binary(cmd)
# Exec command
self.notify.cc_verbose("FromELF: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@staticmethod
def name_mangle(name):
return "_Z%i%sv" % (len(name), name)
@staticmethod
def make_ld_define(name, value):
return "-D%s=%s" % (name, value)
@staticmethod
def redirect_symbol(source, sync, build_dir):
return "-Wl,--defsym=%s=%s" % (source, sync)
@staticmethod
def check_executable():
"""Returns True if the executable (arm-none-eabi-gcc) location
specified by the user exists OR the executable can be found on the PATH.
Returns False otherwise."""
if not TOOLCHAIN_PATHS['GCC_ARM'] or not exists(TOOLCHAIN_PATHS['GCC_ARM']):
if find_executable('arm-none-eabi-gcc'):
TOOLCHAIN_PATHS['GCC_ARM'] = ''
return True
else:
return False
else:
exec_name = join(TOOLCHAIN_PATHS['GCC_ARM'], 'arm-none-eabi-gcc')
return exists(exec_name) or exists(exec_name + '.exe')
class GCC_ARM(GCC):
pass
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'ArpEntryEnum' : _MetaInfoEnum('ArpEntryEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg',
{
'static':'STATIC',
'alias':'ALIAS',
}, 'Cisco-IOS-XR-ipv4-arp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg']),
'ArpEncapEnum' : _MetaInfoEnum('ArpEncapEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg',
{
'arpa':'ARPA',
'srp':'SRP',
'srpa':'SRPA',
'srpb':'SRPB',
}, 'Cisco-IOS-XR-ipv4-arp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg']),
'Arp' : {
'meta_info' : _MetaInfoClass('Arp',
False,
[
_MetaInfoClassMember('inner-cos', ATTRIBUTE, 'int' , None, None,
[(0, 7)], [],
''' Configure inner cos values for arp packets
''',
'inner_cos',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
_MetaInfoClassMember('outer-cos', ATTRIBUTE, 'int' , None, None,
[(0, 7)], [],
''' Configure outer cos values for arp packets
''',
'outer_cos',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'arp',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'Arpgmp.Vrf.Entries.Entry' : {
'meta_info' : _MetaInfoClass('Arpgmp.Vrf.Entries.Entry',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP Address
''',
'address',
'Cisco-IOS-XR-ipv4-arp-cfg', True),
_MetaInfoClassMember('encapsulation', REFERENCE_ENUM_CLASS, 'ArpEncapEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpEncapEnum',
[], [],
''' Encapsulation type
''',
'encapsulation',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
_MetaInfoClassMember('entry-type', REFERENCE_ENUM_CLASS, 'ArpEntryEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpEntryEnum',
[], [],
''' Entry type
''',
'entry_type',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
_MetaInfoClassMember('mac-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' MAC Address
''',
'mac_address',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'entry',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'Arpgmp.Vrf.Entries' : {
'meta_info' : _MetaInfoClass('Arpgmp.Vrf.Entries',
False,
[
_MetaInfoClassMember('entry', REFERENCE_LIST, 'Entry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'Arpgmp.Vrf.Entries.Entry',
[], [],
''' ARP static and alias entry configuration item
''',
'entry',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'entries',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'Arpgmp.Vrf' : {
'meta_info' : _MetaInfoClass('Arpgmp.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-ipv4-arp-cfg', True),
_MetaInfoClassMember('entries', REFERENCE_CLASS, 'Entries' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'Arpgmp.Vrf.Entries',
[], [],
''' ARP static and alias entry configuration
''',
'entries',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'Arpgmp' : {
'meta_info' : _MetaInfoClass('Arpgmp',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'Arpgmp.Vrf',
[], [],
''' Per VRF configuration, for the default VRF use
'default'
''',
'vrf',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'arpgmp',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy.Groups.Group.Peers.Peer' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy.Groups.Group.Peers.Peer',
False,
[
_MetaInfoClassMember('prefix-string', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Neighbor IPv4 address
''',
'prefix_string',
'Cisco-IOS-XR-ipv4-arp-cfg', True, [
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IPv4 address
''',
'prefix_string',
'Cisco-IOS-XR-ipv4-arp-cfg', True),
_MetaInfoClassMember('prefix-string', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IPv4 address
''',
'prefix_string',
'Cisco-IOS-XR-ipv4-arp-cfg', True),
]),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'peer',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy.Groups.Group.Peers' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy.Groups.Group.Peers',
False,
[
_MetaInfoClassMember('peer', REFERENCE_LIST, 'Peer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy.Groups.Group.Peers.Peer',
[], [],
''' None
''',
'peer',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'peers',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-ipv4-arp-cfg', True),
_MetaInfoClassMember('interface-id', ATTRIBUTE, 'int' , None, None,
[(1, 65535)], [],
''' Interface Id for the interface
''',
'interface_id',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces.Interface',
[], [],
''' Interface for this Group
''',
'interface',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy.Groups.Group.InterfaceList' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy.Groups.Group.InterfaceList',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable List of Interfaces for this Group.
Deletion of this object also causes deletion
of all associated objects under
InterfaceList.
''',
'enable',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces',
[], [],
''' Table of Interface
''',
'interfaces',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'interface-list',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy.Groups.Group' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy.Groups.Group',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'int' , None, None,
[(1, 32)], [],
''' Group ID
''',
'group_id',
'Cisco-IOS-XR-ipv4-arp-cfg', True),
_MetaInfoClassMember('interface-list', REFERENCE_CLASS, 'InterfaceList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy.Groups.Group.InterfaceList',
[], [],
''' List of Interfaces for this Group
''',
'interface_list',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
_MetaInfoClassMember('peers', REFERENCE_CLASS, 'Peers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy.Groups.Group.Peers',
[], [],
''' Table of Peer
''',
'peers',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
_MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'source_interface',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'group',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy.Groups' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy.Groups',
False,
[
_MetaInfoClassMember('group', REFERENCE_LIST, 'Group' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy.Groups.Group',
[], [],
''' None
''',
'group',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'groups',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy.Redundancy' : {
'meta_info' : _MetaInfoClass('ArpRedundancy.Redundancy',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Configure parameter for ARP Geo
redundancy. Deletion of this object also causes
deletion of all associated objects under
ArpRedundancy.
''',
'enable',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
_MetaInfoClassMember('groups', REFERENCE_CLASS, 'Groups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy.Groups',
[], [],
''' Table of Group
''',
'groups',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'redundancy',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
'ArpRedundancy' : {
'meta_info' : _MetaInfoClass('ArpRedundancy',
False,
[
_MetaInfoClassMember('redundancy', REFERENCE_CLASS, 'Redundancy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg', 'ArpRedundancy.Redundancy',
[], [],
''' Configure parameter for ARP Geo redundancy
''',
'redundancy',
'Cisco-IOS-XR-ipv4-arp-cfg', False),
],
'Cisco-IOS-XR-ipv4-arp-cfg',
'arp-redundancy',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg'
),
},
}
_meta_table['Arpgmp.Vrf.Entries.Entry']['meta_info'].parent =_meta_table['Arpgmp.Vrf.Entries']['meta_info']
_meta_table['Arpgmp.Vrf.Entries']['meta_info'].parent =_meta_table['Arpgmp.Vrf']['meta_info']
_meta_table['Arpgmp.Vrf']['meta_info'].parent =_meta_table['Arpgmp']['meta_info']
_meta_table['ArpRedundancy.Redundancy.Groups.Group.Peers.Peer']['meta_info'].parent =_meta_table['ArpRedundancy.Redundancy.Groups.Group.Peers']['meta_info']
_meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces.Interface']['meta_info'].parent =_meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces']['meta_info']
_meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces']['meta_info'].parent =_meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList']['meta_info']
_meta_table['ArpRedundancy.Redundancy.Groups.Group.Peers']['meta_info'].parent =_meta_table['ArpRedundancy.Redundancy.Groups.Group']['meta_info']
_meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList']['meta_info'].parent =_meta_table['ArpRedundancy.Redundancy.Groups.Group']['meta_info']
_meta_table['ArpRedundancy.Redundancy.Groups.Group']['meta_info'].parent =_meta_table['ArpRedundancy.Redundancy.Groups']['meta_info']
_meta_table['ArpRedundancy.Redundancy.Groups']['meta_info'].parent =_meta_table['ArpRedundancy.Redundancy']['meta_info']
_meta_table['ArpRedundancy.Redundancy']['meta_info'].parent =_meta_table['ArpRedundancy']['meta_info']
|
|
import functools
import arrow
import datetime
from typing import List, Dict, Set, Union, Generator
from merakicommons.cache import lazy, lazy_property
from merakicommons.container import searchable, SearchableList, SearchableLazyList, SearchableDictionary
from .. import configuration
from ..data import Region, Platform, Tier, GameType, GameMode, Queue, Side, Season, Lane, Role, Key
from .common import CoreData, CoreDataList, CassiopeiaObject, CassiopeiaGhost, CassiopeiaLazyList, provide_default_region, ghost_load_on
from ..dto import match as dto
from .patch import Patch
from .summoner import Summoner
from .staticdata.champion import Champion
from .staticdata.rune import Rune
from .staticdata.summonerspell import SummonerSpell
from .staticdata.item import Item
from .staticdata.map import Map
def load_match_on_attributeerror(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except AttributeError: # teamId
# The match has only partially loaded this participant and it doesn't have all it's data, so load the full match
match = getattr(self, "_{}__match".format(self.__class__.__name__))
if not match._Ghost__is_loaded(MatchData):
match.__load__(MatchData)
match._Ghost__set_loaded(MatchData)
if isinstance(self, Participant):
old_participant = self
elif isinstance(self, ParticipantStats):
old_participant = getattr(self, "_{}__participant".format(self.__class__.__name__))
else:
raise RuntimeError("Impossible!")
for participant in match.participants:
if participant.summoner.name == old_participant.summoner.name:
if isinstance(self, Participant):
self._data[ParticipantData] = participant._data[ParticipantData]
elif isinstance(self, ParticipantStats):
self._data[ParticipantStatsData] = participant.stats._data[ParticipantStatsData]
return method(self, *args, **kwargs)
return method(self, *args, **kwargs)
return wrapper
def _choose_staticdata_version(match):
# If we want to pull the data for the correct version, we need to pull the entire match data.
# However, we can use the creation date (which comes with a matchref) and get the ~ patch and therefore extract the version from the patch.
if configuration.settings.version_from_match is None or configuration.settings.version_from_match == "latest":
version = None # Rather than pick the latest version here, let the obj handle it so it knows which endpoint within the realms data to use
elif configuration.settings.version_from_match == "version" or hasattr(match._data[MatchData], "version"):
version = match.version
version = ".".join(version.split(".")[:2]) + ".1"
elif configuration.settings.version_from_match == "patch":
patch = Patch.from_date(match.creation, region=match.region)
version = patch.majorminor + ".1" # Just always use x.x.1
else:
raise ValueError("Unknown value for setting `version_from_match`:", configuration.settings.version_from_match)
return version
##############
# Data Types #
##############
class MatchListData(CoreDataList):
_dto_type = dto.MatchListDto
_renamed = {"champion": "championIds", "queue": "queues", "season": "seasons"}
class PositionData(CoreData):
_renamed = {}
class EventData(CoreData):
_renamed = {"eventType": "type", "teamId": "side", "pointCaptured": "capturedPoint", "assistingParticipantIds": "assistingParticipants", "skillSlot": "skill"}
def __call__(self, **kwargs):
if "position" in kwargs:
self.position = PositionData(**kwargs.pop("position"))
super().__call__(**kwargs)
return self
class ParticipantFrameData(CoreData):
_renamed = {"totalGold": "goldEarned", "minionsKilled": "creepScore", "xp": "experience", "jungleMinionsKilled": "neutralMinionsKilled"}
def __call__(self, **kwargs):
if "position" in kwargs:
self.position = PositionData(**kwargs.pop("position"))
super().__call__(**kwargs)
return self
class FrameData(CoreData):
_renamed = {}
def __call__(self, **kwargs):
if "events" in kwargs:
self.events = [EventData(**event) for event in kwargs.pop("events")]
if "participantFrames" in kwargs:
self.participantFrames = {int(key): ParticipantFrameData(**pframe) for key, pframe in kwargs.pop("participantFrames").items()}
super().__call__(**kwargs)
return self
class TimelineData(CoreData):
_dto_type = dto.TimelineDto
_renamed = {"matchId": "id", "frameInterval": "frame_interval"}
def __call__(self, **kwargs):
if "frames" in kwargs:
self.frames = [FrameData(**frame) for frame in kwargs.pop("frames")]
super().__call__(**kwargs)
return self
class ParticipantTimelineData(CoreData):
_renamed = {"participantId": "id"}
def __call__(self, **kwargs):
#timeline.setCreepScore(getStatTotals(item.getCreepsPerMinDeltas(), durationInSeconds));
#timeline.setCreepScoreDifference(getStatTotals(item.getCsDiffPerMinDeltas(), durationInSeconds));
#timeline.setDamageTaken(getStatTotals(item.getDamageTakenPerMinDeltas(), durationInSeconds));
#timeline.setDamageTakenDifference(getStatTotals(item.getDamageTakenDiffPerMinDeltas(), durationInSeconds));
#timeline.setExperience(getStatTotals(item.getXpPerMinDeltas(), durationInSeconds));
#timeline.setExperienceDifference(getStatTotals(item.getXpDiffPerMinDeltas(), durationInSeconds));
super().__call__(**kwargs)
return self
class ParticipantStatsData(CoreData):
_renamed = {}
class ParticipantData(CoreData):
_renamed = {"participantId": "id", "spell1Id": "summonerSpellDId", "spell2Id": "summonerSpellFId", "highestAchievedSeasonTier": "rankLastSeason", "bot": "isBot", "profileIcon": "profileIconId"}
def __call__(self, **kwargs):
if "stats" in kwargs:
stats = kwargs.pop("stats")
if "perk0" in stats: # Assume all the rest are too
self.runes = {
stats.pop("perk0"): [stats.pop("perk0Var1"), stats.pop("perk0Var2"), stats.pop("perk0Var3")],
stats.pop("perk1"): [stats.pop("perk1Var1"), stats.pop("perk1Var2"), stats.pop("perk1Var3")],
stats.pop("perk2"): [stats.pop("perk2Var1"), stats.pop("perk2Var2"), stats.pop("perk2Var3")],
stats.pop("perk3"): [stats.pop("perk3Var1"), stats.pop("perk3Var2"), stats.pop("perk3Var3")],
stats.pop("perk4"): [stats.pop("perk4Var1"), stats.pop("perk4Var2"), stats.pop("perk4Var3")],
stats.pop("perk5"): [stats.pop("perk5Var1"), stats.pop("perk5Var2"), stats.pop("perk5Var3")]
}
stats.pop("runes", None)
self.stats = ParticipantStatsData(**stats)
if "timeline" in kwargs:
self.timeline = ParticipantTimelineData(**kwargs.pop("timeline"))
if "teamId" in kwargs:
self.side = Side(kwargs.pop("teamId"))
if "player" in kwargs:
for key, value in kwargs.pop("player").items():
kwargs[key] = value
super().__call__(**kwargs)
return self
class TeamData(CoreData):
_renamed = {"dominionVictoryScore": "dominionScore", "firstBaron": "firstBaronKiller", "firstBlood": "firstBloodKiller", "firstDragon": "firstDragonKiller", "firstInhibitor": "firstInhibitorKiller", "firstRiftHerald": "firstRiftHeraldKiller", "firstTower": "firstTowerKiller"}
def __call__(self, **kwargs):
if "bans" in kwargs:
self.bans = [ban["championId"] for ban in kwargs.pop("bans")]
if "win" in kwargs:
self.isWinner = kwargs.pop("win") != "Fail"
if "teamId" in kwargs:
self.side = Side(kwargs.pop("teamId"))
super().__call__(**kwargs)
return self
class MatchReferenceData(CoreData):
_renamed = {"account_id": "accountId", "gameId": "id", "champion": "championId", "teamId": "side", "platformId": "platform"}
def __call__(self, **kwargs):
if "timestamp" in kwargs:
self.creation = arrow.get(kwargs.pop("timestamp") / 1000)
# Set lane and role if they are missing from the data
if "lane" not in kwargs:
kwargs["lane"] = None
if "role" not in kwargs:
kwargs["role"] = None
super().__call__(**kwargs)
return self
class MatchData(CoreData):
_dto_type = dto.MatchDto
_renamed = {"gameId": "id", "gameVersion": "version", "gameMode": "mode", "gameType": "type", "queueId": "queue"}
def __call__(self, **kwargs):
if "gameCreation" in kwargs:
self.creation = arrow.get(kwargs["gameCreation"] / 1000)
if "gameDuration" in kwargs:
self.duration = datetime.timedelta(seconds=kwargs["gameDuration"])
if "participants" in kwargs:
for participant in kwargs["participants"]:
for pid in kwargs["participantIdentities"]:
if participant["participantId"] == pid["participantId"] and "player" in pid:
participant["player"] = pid["player"]
break
self.participants = []
for i in range(len(kwargs["participants"])):
for participant in kwargs["participants"]:
if i == participant["participantId"] - 1:
participant = ParticipantData(**participant)
self.participants.append(participant)
break
assert len(self.participants) == len(kwargs["participants"])
kwargs.pop("participants")
kwargs.pop("participantIdentities")
if "teams" in kwargs:
self.teams = []
for team in kwargs.pop("teams"):
team_side = Side(team["teamId"])
participants = []
for participant in self.participants:
if participant.side is team_side:
participants.append(participant)
self.teams.append(TeamData(**team, participants=participants))
super().__call__(**kwargs)
return self
##############
# Core Types #
##############
class MatchHistory(CassiopeiaLazyList): # type: List[Match]
"""The match history for a summoner. By default, this will return the entire match history."""
_data_types = {MatchListData}
def __init__(self, *, summoner: Summoner, begin_index: int = None, end_index: int = None, begin_time: arrow.Arrow = None, end_time: arrow.Arrow = None, queues: Set[Queue] = None, seasons: Set[Season] = None, champions: Set[Champion] = None):
assert end_index is None or end_index > begin_index
if begin_time is not None and end_time is not None and begin_time > end_time:
raise ValueError("`end_time` should be greater than `begin_time`")
kwargs = {"region": summoner.region}
kwargs["queues"] = queues or []
kwargs["seasons"] = seasons or []
champions = champions or []
kwargs["championIds"] = [champion.id if isinstance(champion, Champion) else champion for champion in champions]
kwargs["begin_index"] = begin_index
kwargs["end_index"] = end_index
if begin_time is not None and not isinstance(begin_time, (int, float)):
begin_time = begin_time.timestamp * 1000
kwargs["begin_time"] = begin_time
if end_time is not None and not isinstance(end_time, (int, float)):
end_time = end_time.timestamp * 1000
kwargs["end_time"] = end_time
assert isinstance(summoner, Summoner)
self.__account_id_callable = lambda: summoner.account.id
self.__summoner = summoner
CassiopeiaObject.__init__(self, **kwargs)
@classmethod
def __get_query_from_kwargs__(cls, *, summoner: Summoner, begin_index: int = None, end_index: int = None, begin_time: arrow.Arrow = None, end_time: arrow.Arrow = None, queues: Set[Queue] = None, seasons: Set[Season] = None, champions: Set[Champion] = None):
assert isinstance(summoner, Summoner)
query = {"region": summoner.region}
query["account.id"] = summoner.account.id
if begin_index is not None:
query["beginIndex"] = begin_index
if end_index is not None:
query["endIndex"] = end_index
if begin_time is not None:
if isinstance(begin_time, arrow.Arrow):
begin_time = begin_time.timestamp * 1000
query["beginTime"] = begin_time
if end_time is not None:
if isinstance(end_time, arrow.Arrow):
end_time = end_time.timestamp * 1000
query["endTime"] = end_time
if queues is not None:
query["queues"] = queues
if seasons is not None:
query["seasons"] = seasons
if champions is not None:
champions = [champion.id if isinstance(champion, Champion) else champion for champion in champions]
query["champion.ids"] = champions
return query
@classmethod
def from_generator(cls, generator: Generator, summoner: Summoner, **kwargs):
self = cls.__new__(cls)
kwargs["summoner"] = summoner
self.__summoner = summoner
CassiopeiaLazyList.__init__(self, generator=generator, **kwargs)
return self
def __call__(self, **kwargs) -> "MatchHistory":
# summoner, begin_index, end_index, begin_time, end_time, queues, seasons, champions
kwargs.setdefault("summoner", self.__summoner)
kwargs.setdefault("begin_index", self.begin_index)
kwargs.setdefault("end_index", self.end_index)
kwargs.setdefault("begin_time", self.begin_time)
kwargs.setdefault("end_time", self.end_time)
kwargs.setdefault("queues", self.queues)
kwargs.setdefault("seasons", self.seasons)
kwargs.setdefault("champions", self.champions)
return MatchHistory(**kwargs)
@property
def _account_id(self):
try:
return self.__account_id
except AttributeError:
self.__account_id = self.__account_id_callable()
del self.__account_id_callable # This releases the reference to the summoner
return self.__account_id
@lazy_property
def region(self) -> Region:
return Region(self._data[MatchListData].region)
@lazy_property
def platform(self) -> Platform:
return self.region.platform
@lazy_property
def queues(self) -> Set[Queue]:
return {Queue(q) for q in self._data[MatchListData].queues}
@lazy_property
def seasons(self) -> Set[Season]:
return {Season(s) for s in self._data[MatchListData].seasons}
@lazy_property
def champions(self) -> Set[Champion]:
return {Champion(id=cid, region=self.region) for cid in self._data[MatchListData].championIds}
@property
def begin_index(self) -> Union[int, None]:
try:
return self._data[MatchListData].beginIndex
except AttributeError:
return None
@property
def end_index(self) -> Union[int, None]:
try:
return self._data[MatchListData].endIndex
except AttributeError:
return None
@property
def begin_time(self) -> arrow.Arrow:
time = self._data[MatchListData].begin_time
if time is not None:
return arrow.get(time / 1000)
@property
def end_time(self) -> arrow.Arrow:
time = self._data[MatchListData].end_time
if time is not None:
return arrow.get(time / 1000)
class Position(CassiopeiaObject):
_data_types = {PositionData}
def __str__(self):
return "<Position ({}, {})>".format(self.x, self.y)
@property
def x(self) -> int:
return self._data[PositionData].x
@property
def y(self) -> int:
return self._data[PositionData].y
@searchable({str: ["type", "tower_type", "ascended_type", "ward_type", "monster_type", "type", "monster_sub_type", "lane_type", "building_type"]})
class Event(CassiopeiaObject):
_data_types = {EventData}
@property
def tower_type(self) -> str:
return self._data[EventData].towerType
@property
def team_id(self) -> int:
return self._data[EventData].teamId
@property
def ascended_type(self) -> str:
return self._data[EventData].ascendedType
@property
def killer_id(self) -> int:
return self._data[EventData].killerId
@property
def level_up_type(self) -> str:
return self._data[EventData].levelUpType
@property
def captured_point(self) -> str:
return self._data[EventData].capturedPoint
@property
def assisting_participants(self) -> List[int]:
return self._data[EventData].assistingParticipants
@property
def ward_type(self) -> str:
return self._data[EventData].wardType
@property
def monster_type(self) -> str:
return self._data[EventData].monsterType
@property
def type(self) -> List[str]:
"""Legal values: CHAMPION_KILL, WARD_PLACED, WARD_KILL, BUILDING_KILL, ELITE_MONSTER_KILL, ITEM_PURCHASED, ITEM_SOLD, ITEM_DESTROYED, ITEM_UNDO, SKILL_LEVEL_UP, ASCENDED_EVENT, CAPTURE_POINT, PORO_KING_SUMMON"""
return self._data[EventData].type
@property
def skill(self) -> int:
return self._data[EventData].skill
@property
def victim_id(self) -> int:
return self._data[EventData].victimId
@property
def timestamp(self) -> datetime.timedelta:
return datetime.timedelta(seconds=self._data[EventData].timestamp/1000)
@property
def after_id(self) -> int:
return self._data[EventData].afterId
@property
def monster_sub_type(self) -> str:
return self._data[EventData].monsterSubType
@property
def lane_type(self) -> str:
return self._data[EventData].laneType
@property
def item_id(self) -> int:
return self._data[EventData].itemId
@property
def participant_id(self) -> int:
return self._data[EventData].participantId
@property
def building_type(self) -> str:
return self._data[EventData].buildingType
@property
def creator_id(self) -> int:
return self._data[EventData].creatorId
@property
def position(self) -> Position:
return Position.from_data(self._data[EventData].position)
@property
def before_id(self) -> int:
return self._data[EventData].beforeId
class ParticipantFrame(CassiopeiaObject):
_data_types = {ParticipantFrameData}
@property
def gold_earned(self) -> int:
return self._data[ParticipantFrameData].goldEarned
@property
def team_score(self) -> int:
return self._data[ParticipantFrameData].teamScore
@property
def participant_id(self) -> int:
return self._data[ParticipantFrameData].participantId
@property
def level(self) -> int:
return self._data[ParticipantFrameData].level
@property
def current_gold(self) -> int:
return self._data[ParticipantFrameData].currentGold
@property
def creep_score(self) -> int:
return self._data[ParticipantFrameData].creepScore
@property
def dominion_score(self) -> int:
return self._data[ParticipantFrameData].dominionScore
@property
def position(self) -> Position:
return Position.from_data(self._data[ParticipantFrameData].position)
@property
def experience(self) -> int:
return self._data[ParticipantFrameData].experience
@property
def neutral_minions_killed(self) -> int:
return self._data[ParticipantFrameData].neutralMinionsKilled
class Frame(CassiopeiaObject):
_data_types = {FrameData}
@property
def timestamp(self) -> datetime.timedelta:
return datetime.timedelta(seconds=self._data[FrameData].timestamp/1000)
@property
def participant_frames(self) -> Dict[int, ParticipantFrame]:
return SearchableDictionary({k: ParticipantFrame.from_data(frame) for k, frame in self._data[FrameData].participantFrames.items()})
@property
def events(self) -> List[Event]:
return SearchableList([Event.from_data(event) for event in self._data[FrameData].events])
class Timeline(CassiopeiaGhost):
_data_types = {TimelineData}
@provide_default_region
def __init__(self, *, id: int = None, region: Union[Region, str] = None):
kwargs = {"region": region, "id": id}
super().__init__(**kwargs)
def __get_query__(self):
return {"region": self.region, "platform": self.platform, "id": self.id}
@property
def id(self):
return self._data[TimelineData].id
@property
def region(self) -> Region:
return Region(self._data[TimelineData].region)
@property
def platform(self) -> Platform:
return self.region.platform
@CassiopeiaGhost.property(TimelineData)
@ghost_load_on
def frames(self) -> List[Frame]:
return SearchableList([Frame.from_data(frame) for frame in self._data[TimelineData].frames])
@CassiopeiaGhost.property(TimelineData)
@ghost_load_on
def frame_interval(self) -> int:
return self._data[TimelineData].frame_interval
class ParticipantTimeline(CassiopeiaObject):
_data_types = {ParticipantTimelineData}
@classmethod
def from_data(cls, data: CoreData, match: "Match"):
self = super().from_data(data)
self.__match = match
return self
@property
def frames(self):
these = []
for frame in self.__match.timeline.frames:
for pid, pframe in frame.participant_frames.items():
if pid == self.id:
these.append(pframe)
return these
@property
def events(self):
my_events = []
timeline = self.__match.timeline
for frame in timeline.frames:
for event in frame.events:
try:
if event.participant_id == self.id:
my_events.append(event)
except AttributeError:
pass
try:
if event.creator_id == self.id:
my_events.append(event)
except AttributeError:
pass
try:
if event.killer_id == self.id:
my_events.append(event)
except AttributeError:
pass
try:
if event.victim_id == self.id:
my_events.append(event)
except AttributeError:
pass
try:
if self.id in event.assisting_participants:
my_events.append(event)
except AttributeError:
pass
return SearchableList(my_events)
@property
def champion_kills(self):
return self.events.filter(lambda event: event.type == "CHAMPION_KILL" and event.killer_id == self.id)
@property
def champion_deaths(self):
return self.events.filter(lambda event: event.type == "CHAMPION_KILL" and event.victim_id == self.id)
@property
def champion_assists(self):
return self.events.filter(lambda event: event.type == "CHAMPION_KILL" and self.id in event.assisting_participants)
@property
def lane(self) -> str:
return Lane.from_match_naming_scheme(self._data[ParticipantTimelineData].lane)
@property
def role(self) -> Union[str, Role]:
role = self._data[ParticipantTimelineData].role
if role == "NONE":
role = None
elif role == "SOLO":
role = "SOLO"
elif role == "DUO":
role = "DUO"
else:
role = Role.from_match_naming_scheme(role)
return role
@property
def id(self) -> int:
return self._data[ParticipantTimelineData].id
@property
def cs_diff_per_min_deltas(self) -> Dict[str, float]:
return self._data[ParticipantTimelineData].csDiffPerMinDeltas
@property
def gold_per_min_deltas(self) -> Dict[str, float]:
return self._data[ParticipantTimelineData].goldPerMinDeltas
@property
def xp_diff_per_min_deltas(self) -> Dict[str, float]:
return self._data[ParticipantTimelineData].xpDiffPerMinDeltas
@property
def creeps_per_min_deltas(self) -> Dict[str, float]:
return self._data[ParticipantTimelineData].creepsPerMinDeltas
@property
def xp_per_min_deltas(self) -> Dict[str, float]:
return self._data[ParticipantTimelineData].xpPerMinDeltas
@property
def damage_taken_per_min_deltas(self) -> Dict[str, float]:
return self._data[ParticipantTimelineData].damageTakenPerMinDeltas
@property
def damage_taken_diff_per_min_deltas(self) -> Dict[str, float]:
return self._data[ParticipantTimelineData].damageTakenDiffPerMinDeltas
@searchable({str: ["items"], Item: ["items"]})
class ParticipantStats(CassiopeiaObject):
_data_types = {ParticipantStatsData}
@classmethod
def from_data(cls, data: ParticipantStatsData, match: "Match", participant: "Participant"):
self = super().from_data(data)
self.__match = match
self.__participant = participant
return self
@property
@load_match_on_attributeerror
def kda(self) -> float:
try:
return (self.kills + self.assists) / self.deaths
except ZeroDivisionError:
return self.kills + self.assists
@property
@load_match_on_attributeerror
def physical_damage_dealt(self) -> int:
return self._data[ParticipantStatsData].physicalDamageDealt
@property
@load_match_on_attributeerror
def magic_damage_dealt(self) -> int:
return self._data[ParticipantStatsData].magicDamageDealt
@property
@load_match_on_attributeerror
def neutral_minions_killed_team_jungle(self) -> int:
return self._data[ParticipantStatsData].neutralMinionsKilledTeamJungle
@property
@load_match_on_attributeerror
def total_player_score(self) -> int:
return self._data[ParticipantStatsData].totalPlayerScore
@property
@load_match_on_attributeerror
def deaths(self) -> int:
return self._data[ParticipantStatsData].deaths
@property
@load_match_on_attributeerror
def win(self) -> bool:
return self._data[ParticipantStatsData].win
@property
@load_match_on_attributeerror
def neutral_minions_killed_enemy_jungle(self) -> int:
return self._data[ParticipantStatsData].neutralMinionsKilledEnemyJungle
@property
@load_match_on_attributeerror
def altars_captured(self) -> int:
return self._data[ParticipantStatsData].altarsCaptured
@property
@load_match_on_attributeerror
def largest_critical_strike(self) -> int:
return self._data[ParticipantStatsData].largestCriticalStrike
@property
@load_match_on_attributeerror
def total_damage_dealt(self) -> int:
return self._data[ParticipantStatsData].totalDamageDealt
@property
@load_match_on_attributeerror
def magic_damage_dealt_to_champions(self) -> int:
return self._data[ParticipantStatsData].magicDamageDealtToChampions
@property
@load_match_on_attributeerror
def vision_wards_bought_in_game(self) -> int:
return self._data[ParticipantStatsData].visionWardsBoughtInGame
@property
@load_match_on_attributeerror
def damage_dealt_to_objectives(self) -> int:
return self._data[ParticipantStatsData].damageDealtToObjectives
@property
@load_match_on_attributeerror
def largest_killing_spree(self) -> int:
return self._data[ParticipantStatsData].largestKillingSpree
@property
@load_match_on_attributeerror
def quadra_kills(self) -> int:
return self._data[ParticipantStatsData].quadraKills
@property
@load_match_on_attributeerror
def team_objective(self) -> int:
return self._data[ParticipantStatsData].teamObjective
@property
@load_match_on_attributeerror
def total_time_crowd_control_dealt(self) -> int:
return self._data[ParticipantStatsData].totalTimeCrowdControlDealt
@property
@load_match_on_attributeerror
def longest_time_spent_living(self) -> int:
return self._data[ParticipantStatsData].longestTimeSpentLiving
@property
@load_match_on_attributeerror
def wards_killed(self) -> int:
return self._data[ParticipantStatsData].wardsKilled
@property
@load_match_on_attributeerror
def first_tower_assist(self) -> bool:
return self._data[ParticipantStatsData].firstTowerAssist
@property
@load_match_on_attributeerror
def first_tower_kill(self) -> bool:
return self._data[ParticipantStatsData].firstTowerKill
@lazy_property
@load_match_on_attributeerror
def items(self) -> List[Item]:
ids = [self._data[ParticipantStatsData].item0,
self._data[ParticipantStatsData].item1,
self._data[ParticipantStatsData].item2,
self._data[ParticipantStatsData].item3,
self._data[ParticipantStatsData].item4,
self._data[ParticipantStatsData].item5,
self._data[ParticipantStatsData].item6
]
version = _choose_staticdata_version(self.__match)
return SearchableList([Item(id=id, version=version, region=self.__match.region) if id else None for id in ids])
@property
@load_match_on_attributeerror
def first_blood_assist(self) -> bool:
return self._data[ParticipantStatsData].firstBloodAssist
@property
@load_match_on_attributeerror
def vision_score(self) -> int:
return self._data[ParticipantStatsData].visionScore
@property
@load_match_on_attributeerror
def wards_placed(self) -> int:
return self._data[ParticipantStatsData].wardsPlaced
@property
@load_match_on_attributeerror
def turret_kills(self) -> int:
return self._data[ParticipantStatsData].turretKills
@property
@load_match_on_attributeerror
def triple_kills(self) -> int:
return self._data[ParticipantStatsData].tripleKills
@property
@load_match_on_attributeerror
def damage_self_mitigated(self) -> int:
return self._data[ParticipantStatsData].damageSelfMitigated
@property
@load_match_on_attributeerror
def level(self) -> int:
return self._data[ParticipantStatsData].champLevel
@property
@load_match_on_attributeerror
def node_neutralize_assist(self) -> int:
return self._data[ParticipantStatsData].nodeNeutralizeAssist
@property
@load_match_on_attributeerror
def first_inhibitor_kill(self) -> bool:
return self._data[ParticipantStatsData].firstInhibitorKill
@property
@load_match_on_attributeerror
def gold_earned(self) -> int:
return self._data[ParticipantStatsData].goldEarned
@property
@load_match_on_attributeerror
def magical_damage_taken(self) -> int:
return self._data[ParticipantStatsData].magicalDamageTaken
@property
@load_match_on_attributeerror
def kills(self) -> int:
return self._data[ParticipantStatsData].kills
@property
@load_match_on_attributeerror
def double_kills(self) -> int:
return self._data[ParticipantStatsData].doubleKills
@property
@load_match_on_attributeerror
def node_capture_assist(self) -> int:
return self._data[ParticipantStatsData].nodeCaptureAssist
@property
@load_match_on_attributeerror
def true_damage_taken(self) -> int:
return self._data[ParticipantStatsData].trueDamageTaken
@property
@load_match_on_attributeerror
def node_neutralize(self) -> int:
return self._data[ParticipantStatsData].nodeNeutralize
@property
@load_match_on_attributeerror
def first_inhibitor_assist(self) -> bool:
return self._data[ParticipantStatsData].firstInhibitorAssist
@property
@load_match_on_attributeerror
def assists(self) -> int:
return self._data[ParticipantStatsData].assists
@property
@load_match_on_attributeerror
def unreal_kills(self) -> int:
return self._data[ParticipantStatsData].unrealKills
@property
@load_match_on_attributeerror
def neutral_minions_killed(self) -> int:
return self._data[ParticipantStatsData].neutralMinionsKilled
@property
@load_match_on_attributeerror
def objective_player_score(self) -> int:
return self._data[ParticipantStatsData].objectivePlayerScore
@property
@load_match_on_attributeerror
def combat_player_score(self) -> int:
return self._data[ParticipantStatsData].combatPlayerScore
@property
@load_match_on_attributeerror
def damage_dealt_to_turrets(self) -> int:
return self._data[ParticipantStatsData].damageDealtToTurrets
@property
@load_match_on_attributeerror
def altars_neutralized(self) -> int:
return self._data[ParticipantStatsData].altarsNeutralized
@property
@load_match_on_attributeerror
def physical_damage_dealt_to_champions(self) -> int:
return self._data[ParticipantStatsData].physicalDamageDealtToChampions
@property
@load_match_on_attributeerror
def gold_spent(self) -> int:
return self._data[ParticipantStatsData].goldSpent
@property
@load_match_on_attributeerror
def true_damage_dealt(self) -> int:
return self._data[ParticipantStatsData].trueDamageDealt
@property
@load_match_on_attributeerror
def true_damage_dealt_to_champions(self) -> int:
return self._data[ParticipantStatsData].trueDamageDealtToChampions
@property
@load_match_on_attributeerror
def id(self) -> int:
return self._data[ParticipantStatsData].id
@property
@load_match_on_attributeerror
def penta_kills(self) -> int:
return self._data[ParticipantStatsData].pentaKills
@property
@load_match_on_attributeerror
def total_heal(self) -> int:
return self._data[ParticipantStatsData].totalHeal
@property
@load_match_on_attributeerror
def total_minions_killed(self) -> int:
return self._data[ParticipantStatsData].totalMinionsKilled
@property
@load_match_on_attributeerror
def first_blood_kill(self) -> bool:
return self._data[ParticipantStatsData].firstBloodKill
@property
@load_match_on_attributeerror
def node_capture(self) -> int:
return self._data[ParticipantStatsData].nodeCapture
@property
@load_match_on_attributeerror
def largest_multi_kill(self) -> int:
return self._data[ParticipantStatsData].largestMultiKill
@property
@load_match_on_attributeerror
def sight_wards_bought_in_game(self) -> int:
return self._data[ParticipantStatsData].sightWardsBoughtInGame
@property
@load_match_on_attributeerror
def total_damage_dealt_to_champions(self) -> int:
return self._data[ParticipantStatsData].totalDamageDealtToChampions
@property
@load_match_on_attributeerror
def total_units_healed(self) -> int:
return self._data[ParticipantStatsData].totalUnitsHealed
@property
@load_match_on_attributeerror
def inhibitor_kills(self) -> int:
return self._data[ParticipantStatsData].inhibitorKills
@property
@load_match_on_attributeerror
def total_score_rank(self) -> int:
return self._data[ParticipantStatsData].totalScoreRank
@property
@load_match_on_attributeerror
def total_damage_taken(self) -> int:
return self._data[ParticipantStatsData].totalDamageTaken
@property
@load_match_on_attributeerror
def killing_sprees(self) -> int:
return self._data[ParticipantStatsData].killingSprees
@property
@load_match_on_attributeerror
def time_CCing_others(self) -> int:
return self._data[ParticipantStatsData].timeCCingOthers
@property
@load_match_on_attributeerror
def physical_damage_taken(self) -> int:
return self._data[ParticipantStatsData].physicalDamageTaken
@searchable({str: ["summoner", "champion", "stats", "runes", "side", "summoner_spell_d", "summoner_spell_f"], Summoner: ["summoner"], Champion: ["champion"], Side: ["side"], Rune: ["runes"], SummonerSpell: ["summoner_spell_d", "summoner_spell_f"]})
class Participant(CassiopeiaObject):
_data_types = {ParticipantData}
@classmethod
def from_data(cls, data: CoreData, match: "Match"):
self = super().from_data(data)
self.__match = match
return self
@property
def version(self) -> str:
version = self.__match.version
version = version.split(".")[0:2]
version = ".".join(version) + ".1" # Always use x.x.1 because I don't know how to figure out what the last version number should be.
return version
@property
def lane(self):
return self.timeline.lane
@property
def role(self):
return self.timeline.role
@property
def skill_order(self):
skill_events = self.timeline.events.filter(lambda event: event.type == "SKILL_LEVEL_UP")
skill_events.sort(key=lambda event: event.timestamp)
skills = [event.skill - 1 for event in skill_events]
spells = [self.champion.spells[Key("Q")], self.champion.spells[Key("W")], self.champion.spells[Key("E")], self.champion.spells[Key("R")]]
skills = [spells[skill] for skill in skills]
return skills
@lazy_property
@load_match_on_attributeerror
def stats(self) -> ParticipantStats:
return ParticipantStats.from_data(self._data[ParticipantData].stats, match=self.__match, participant=self)
@property
def id(self) -> int:
return self._data[ParticipantData].id
@lazy_property
@load_match_on_attributeerror
def is_bot(self) -> bool:
return self._data[ParticipantData].isBot
@lazy_property
@load_match_on_attributeerror
def runes(self) -> Dict["Rune", int]:
version = _choose_staticdata_version(self.__match)
return SearchableDictionary({Rune(id=rune_id, version=version, region=self.__match.region): perk_vars
for rune_id, perk_vars in self._data[ParticipantData].runes.items()})
@lazy_property
@load_match_on_attributeerror
def timeline(self) -> ParticipantTimeline:
timeline = ParticipantTimeline.from_data(self._data[ParticipantData].timeline, match=self.__match)
timeline(id=self.id)
return timeline
@lazy_property
@load_match_on_attributeerror
def side(self) -> Side:
return Side(self._data[ParticipantData].side)
@lazy_property
@load_match_on_attributeerror
def summoner_spell_d(self) -> SummonerSpell:
version = _choose_staticdata_version(self.__match)
return SummonerSpell(id=self._data[ParticipantData].summonerSpellDId, version=version, region=self.__match.region)
@lazy_property
@load_match_on_attributeerror
def summoner_spell_f(self) -> SummonerSpell:
version = _choose_staticdata_version(self.__match)
return SummonerSpell(id=self._data[ParticipantData].summonerSpellFId, version=version, region=self.__match.region)
@lazy_property
@load_match_on_attributeerror
def rank_last_season(self) -> Tier:
return Tier(self._data[ParticipantData].rankLastSeason)
@lazy_property
@load_match_on_attributeerror
def champion(self) -> "Champion":
# See ParticipantStats for info
version = _choose_staticdata_version(self.__match)
return Champion(id=self._data[ParticipantData].championId, version=version, region=self.__match.region)
# All the Player data from ParticipantIdentities.player is contained in the Summoner class.
# The non-current accountId and platformId should never be relevant/used, and can be deleted from our type system.
# See: https://discussion.developer.riotgames.com/questions/1713/is-there-any-scenario-where-accountid-should-be-us.html
@lazy_property
def summoner(self) -> Summoner:
kwargs = {}
try:
kwargs["id"] = self._data[ParticipantData].summonerId
except AttributeError:
pass
try:
kwargs["name"] = self._data[ParticipantData].summonerName
except AttributeError:
pass
kwargs["account"] = self._data[ParticipantData].currentAccountId
kwargs["region"] = Platform(self._data[ParticipantData].currentPlatformId).region
summoner = Summoner(**kwargs)
try:
summoner(profileIconId=self._data[ParticipantData].profileIconId)
except AttributeError:
pass
return summoner
@property
def team(self) -> "Team":
if self.side == Side.blue:
return self.__match.blue_team
else:
return self.__match.red_team
@property
def enemy_team(self) -> "Team":
if self.side == Side.blue:
return self.__match.red_team
else:
return self.__match.blue_team
@searchable({str: ["participants"], bool: ["win"]})
class Team(CassiopeiaObject):
_data_types = {TeamData}
@classmethod
def from_data(cls, data: CoreData, match: "Match"):
self = super().from_data(data)
self.__match = match
return self
@property
def first_dragon(self) -> bool:
return self._data[TeamData].firstDragonKiller
@property
def first_inhibitor(self) -> bool:
return self._data[TeamData].firstInhibitorKiller
@property
def first_rift_herald(self) -> bool:
return self._data[TeamData].firstRiftHeraldKiller
@property
def first_baron(self) -> bool:
return self._data[TeamData].firstBaronKiller
@property
def first_tower(self) -> bool:
return self._data[TeamData].firstTowerKiller
@property
def first_blood(self) -> bool:
return self._data[TeamData].firstBloodKiller
@property
def bans(self) -> List["Champion"]:
return [Champion(id=champion_id, version=self.__match.version, region=self.__match.region) if champion_id != -1 else None for champion_id in self._data[TeamData].bans]
@property
def baron_kills(self) -> int:
return self._data[TeamData].baronKills
@property
def rift_herald_kills(self) -> int:
return self._data[TeamData].riftHeraldKills
@property
def vilemaw_kills(self) -> int:
return self._data[TeamData].vilemawKills
@property
def inhibitor_kills(self) -> int:
return self._data[TeamData].inhibitorKills
@property
def tower_kills(self) -> int:
return self._data[TeamData].towerKills
@property
def dragon_kills(self) -> int:
return self._data[TeamData].dragonKills
@property
def side(self) -> Side:
return self._data[TeamData].side
@property
def dominion_score(self) -> int:
return self._data[TeamData].dominionScore
@property
def win(self) -> bool:
return self._data[TeamData].isWinner
@lazy_property
def participants(self) -> List[Participant]:
return SearchableList([Participant.from_data(p, match=self.__match) for p in self._data[TeamData].participants])
@searchable({str: ["participants", "region", "platform", "season", "queue", "mode", "map", "type"], Region: ["region"], Platform: ["platform"], Season: ["season"], Queue: ["queue"], GameMode: ["mode"], Map: ["map"], GameType: ["type"], Item: ["participants"], Champion: ["participants"], Patch: ["patch"]})
class Match(CassiopeiaGhost):
_data_types = {MatchData}
@provide_default_region
def __init__(self, *, id: int = None, region: Union[Region, str] = None):
kwargs = {"region": region, "id": id}
super().__init__(**kwargs)
self.__participants = [] # For lazy-loading the participants in a special way
def __get_query__(self):
return {"region": self.region, "platform": self.platform, "id": self.id}
@classmethod
def from_match_reference(cls, ref: MatchReferenceData):
instance = cls(id=ref.id, region=ref.region)
# The below line is necessary because it's possible to pull this match from the cache (which has Match core objects in it).
# In that case, the data will already be loaded and we don't want to overwrite anything.
if not hasattr(instance._data[MatchData], "participants"):
participant = {"participantId": 1, "championId": ref.championId, "stats": {"lane": ref.lane, "role": ref.role}}
player = {"participantId": 1, "currentAccountId": ref.accountId, "currentPlatformId": ref.platform}
instance(season=ref.season, queue=ref.queue, creation=ref.creation)
instance._data[MatchData](participants=[participant],
participantIdentities=[{"participantId": 1, "player": player, "bot": False}])
return instance
def __eq__(self, other: "Match"):
if not isinstance(other, Match) or self.region != other.region:
return False
return self.id == other.id
def __str__(self):
region = self.region
id_ = self.id
return "Match(id={id_}, region='{region}')".format(id_=id_, region=region.value)
__hash__ = CassiopeiaGhost.__hash__
@lazy_property
def region(self) -> Region:
"""The region for this match."""
return Region(self._data[MatchData].region)
@property
def platform(self) -> Platform:
"""The platform for this match."""
return self.region.platform
@property
def id(self) -> int:
return self._data[MatchData].id
@lazy_property
def timeline(self) -> Timeline:
return Timeline(id=self.id, region=self.region.value)
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def season(self) -> Season:
return Season.from_id(self._data[MatchData].seasonId)
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def queue(self) -> Queue:
return Queue.from_id(self._data[MatchData].queue)
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
# This method is lazy-loaded in a special way because of its unique behavior
def participants(self) -> List[Participant]:
# This is a complicated function because we don't want to load the particpants if the only one the user cares about is the one loaded from a match ref
def generate_participants(match):
if not hasattr(match._data[MatchData], "participants"):
empty_match = True
else:
empty_match = False
# If a participant was provided from a matchref, yield that first
yielded_one = False
if not empty_match and len(match._data[MatchData].participants) == 1:
yielded_one = True
try:
yield match.__participants[0]
except IndexError:
p = match._data[MatchData].participants[0]
participant = Participant.from_data(p, match=match)
match.__participants.append(participant)
yield participant
# Create all the participants if any haven't been created yet.
# Note that it's important to overwrite the one from the matchref if it was loaded because we have more data after we load the full match.
if empty_match or yielded_one or len(match.__participants) < len(match._data[MatchData].participants):
if not match._Ghost__is_loaded(MatchData):
match.__load__(MatchData)
match._Ghost__set_loaded(MatchData) # __load__ doesn't trigger __set_loaded.
for i, p in enumerate(match._data[MatchData].participants):
participant = Participant.from_data(p, match=match)
# If we already have this participant in the list, replace it so it stays in the same position
for j, pold in enumerate(match.__participants):
if hasattr(pold._data[ParticipantData], "currentAccountId") and hasattr(participant._data[ParticipantData], "currentAccountId") and pold._data[ParticipantData].currentAccountId == participant._data[ParticipantData].currentAccountId:
match.__participants[j] = participant
break
else:
match.__participants.append(participant)
# Yield the rest of the participants
for participant in match.__participants[yielded_one:]:
yield participant
return SearchableLazyList(generate_participants(self))
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def teams(self) -> List[Team]:
return [Team.from_data(t, match=self) for i, t in enumerate(self._data[MatchData].teams)]
@property
def red_team(self) -> Team:
if self.teams[0].side is Side.red:
return self.teams[0]
else:
return self.teams[1]
@property
def blue_team(self) -> Team:
if self.teams[0].side is Side.blue:
return self.teams[0]
else:
return self.teams[1]
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
def version(self) -> str:
return self._data[MatchData].version
@property
def patch(self) -> Patch:
version = ".".join(self.version.split(".")[:2])
patch = Patch.from_str(version, region=self.region)
return patch
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def mode(self) -> GameMode:
return GameMode(self._data[MatchData].mode)
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def map(self) -> Map:
version = _choose_staticdata_version(self)
return Map(id=self._data[MatchData].mapId, region=self.region, version=version)
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def type(self) -> GameType:
return GameType(self._data[MatchData].type)
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def duration(self) -> datetime.timedelta:
return self._data[MatchData].duration
@CassiopeiaGhost.property(MatchData)
@ghost_load_on
@lazy
def creation(self) -> arrow.Arrow:
return self._data[MatchData].creation
@property
def is_remake(self) -> bool:
return self.duration < datetime.timedelta(minutes=5)
def kills_heatmap(self):
if self.map.name == "Summoner's Rift":
rx0, ry0, rx1, ry1 = 0, 0, 14820, 14881
elif self.map.name == "Howling Abyss":
rx0, ry0, rx1, ry1 = -28, -19, 12849, 12858
else:
raise NotImplemented
imx0, imy0, imx1, imy1 = self.map.image.image.getbbox()
def position_to_map_image_coords(position):
x, y = position.x, position.y
x -= rx0
x /= (rx1 - rx0)
x *= (imx1 - imx0)
y -= ry0
y /= (ry1 - ry0)
y *= (imy1 - imy0)
return x, y
import matplotlib.pyplot as plt
size = 8
plt.figure(figsize=(size, size))
plt.imshow(self.map.image.image.rotate(-90))
for p in self.participants:
for kill in p.timeline.champion_kills:
x, y = position_to_map_image_coords(kill.position)
if p.team.side == Side.blue:
plt.scatter([x], [y], c="b", s=size * 10)
else:
plt.scatter([x], [y], c="r", s=size * 10)
plt.axis('off')
plt.show()
|
|
#! /usr/bin/env python
__author__ = "Fabio Giuseppe Di Benedetto"
import socket
import select
import Queue
import re
import json
import subprocess
import rospy
from robotics_msgs.msg import Robot_Event
from robotics_msgs.msg import CB_Event
from robotics_msgs.srv import FIROS_Info, FIROS_InfoResponse
class RCMDriver:
DEFAULT_NODE_NAME = "rcmdriver"
DEFAULT_OUT_TOPIC = "/rcm/robot_event"
DEFAULT_IN_TOPIC = "/firos/cb_event"
DEFAULT_SERVICE = "/rcm/firos_info"
DEFAULT_QUEUE_SIZE = 10
#
PARAM_PORT = "port="
# these are constants duplicated from RCMDriver and used to deal with that
# component
COMMAND_KEY = "rcm_d_cmd"
NOTIFY_CONNECTION = "r_connection"
NOTIFY_DISCONNECTION = "r_disconnection"
R_NAME_KEY = "r_name"
ENTITY_CREATED = "e_created"
ENTITY_REMOVED = "e_removed"
ASK_INFO = "r_info"
R_INFO_KEY = "r_info_data"
# for topics extraction
RT_NAME_KEY = "ros_topic_name"
RT_MSG_KEY = "ros_topic_message"
# commonly used flags
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
READ_WRITE = READ_ONLY | select.POLLOUT
DEFAULT_DRIVER_PORT = 9998
def __init__(self, port=DEFAULT_DRIVER_PORT):
rospy.init_node(self.DEFAULT_NODE_NAME)
argv = rospy.myargv()
p_port = None
if len(argv) > 1:
# there are parameters passed
for arg in argv:
if self.PARAM_PORT in arg:
tmp = arg.replace(self.PARAM_PORT, "")
if tmp.isdigit():
# not digits means the port is wrong, so we don't set it
p_port = tmp
rospy.loginfo("rcm driver listen to rcm platform node on port %s" % p_port)
self.rcm_driver_s_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.rcm_driver_s_server.setblocking(0)
self.rcm_driver_s_server_address = ("localhost", int(p_port) if p_port else port)
self.rcm_driver_s_server.bind(self.rcm_driver_s_server_address)
# listen for incoming connection (only one)
self.rcm_driver_s_server.listen(1)
self.connection = None
self.client_address = None
self._buf_size = 1024
self.rcm_re_publisher = None
self.rcm_fi_service = None
self.robot_name = None
self._robot_status = Queue.Queue()
self._entity_status = Queue.Queue()
# we get a polling object
self.po = select.poll()
# register the server on read only events
self.po.register(self.rcm_driver_s_server, self.READ_ONLY)
# map file descriptors to socket objects
self.fd_to_socket = {self.rcm_driver_s_server.fileno(): self.rcm_driver_s_server}
rt_name_extractor_str = '(?P<%s>[a-zA-Z0-9\_\/]+)' % self.RT_NAME_KEY
rt_msg_extractor_str = '(?P<%s>[a-zA-Z0-9\_\/]+)' % self.RT_MSG_KEY
elements_str = '[0-9]+'
pub_str = 'publisher[s]?'
sub_str = 'subscriber[s]?'
rt_publisher_extractor_str = ' * %s \[%s\] %s %s' % \
(rt_name_extractor_str, rt_msg_extractor_str, elements_str, pub_str)
rt_subscriber_extractor_str = ' * %s \[%s\] %s %s' % \
(rt_name_extractor_str, rt_msg_extractor_str, elements_str, sub_str)
self.rt_publisher_extractor_re = re.compile(rt_publisher_extractor_str)
self.rt_subscriber_extractor_re = re.compile(rt_subscriber_extractor_str)
def cb_event_handler(self, data):
if data.entity_name != self.robot_name:
rospy.loginfo("firos changed the robot name from %s to %s" % (self.robot_name, data.entity_name))
import json
if data.entity_status == data.CREATED:
# we notify the entity creation so that the node can go on (watchdog can start
# to ping the other end to check the connection status)
msg_dict = {self.COMMAND_KEY: self.ENTITY_CREATED, self.R_NAME_KEY: self.robot_name}
else:
# we notify the entity deletion so that the node can go on in the clean up
msg_dict = {self.COMMAND_KEY: self.ENTITY_REMOVED, self.R_NAME_KEY: self.robot_name}
request = json.dumps(msg_dict)
rospy.loginfo("-- TMP -- firos response about entity: %s" % request)
self._entity_status.put_nowait(request)
def firos_info_handler(self, request):
if request.instance_name != self.robot_name:
rospy.loginfo("firos changed the robot name from %s to %s" % (self.robot_name, request.instance_name))
rospy.loginfo("-- TMP -- firos asked info about '%s'" % request.instance_name)
cmd = ['rostopic', 'list', '-v']
tl_p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tl_p_out, tl_p_err = tl_p.communicate()
rtn_code = tl_p.poll()
topics = []
if not rtn_code:
# not errors
m_pub_list = self.rt_publisher_extractor_re.finditer(tl_p_out)
if m_pub_list is not None:
for m in m_pub_list:
if not (m.group(self.RT_NAME_KEY) == self.DEFAULT_OUT_TOPIC
or m.group(self.RT_NAME_KEY) == self.DEFAULT_IN_TOPIC):
# we hide the topics used internally between rcm and firos
topics.append({"name": m.group(self.RT_NAME_KEY),
"msg": m.group(self.RT_MSG_KEY),
"type": "publisher"})
m_sub_list = self.rt_subscriber_extractor_re.finditer(tl_p_out)
if m_sub_list is not None:
for m in m_sub_list:
if not (m.group(self.RT_NAME_KEY) == self.DEFAULT_OUT_TOPIC
or m.group(self.RT_NAME_KEY) == self.DEFAULT_IN_TOPIC):
# we hide the topics used internally between rcm and firos
topics.append({"name": m.group(self.RT_NAME_KEY),
"msg": m.group(self.RT_MSG_KEY),
"type": "subscriber"})
result = json.dumps(topics)
rospy.loginfo("-- TMP -- rcm_driver provide to firos the response: %s" % result)
return FIROS_InfoResponse(result)
def run(self):
rospy.loginfo("---- rcm driver starting ----")
# enable publishing Robot_Event messages on DEFAULT_OUT_TOPIC (/rcm/robot_event)
self.rcm_re_publisher = rospy.Publisher(self.DEFAULT_OUT_TOPIC, Robot_Event,
queue_size=self.DEFAULT_QUEUE_SIZE)
# enable answering requests of FIROS_Info as service provider of DEFAULT_SERVICE (/rcm/firos_info)
self.rcm_fi_service = rospy.Service(self.DEFAULT_SERVICE, FIROS_Info, self.firos_info_handler)
# enable listening to DEFAULT_IN_TOPIC (/firos/cb_event) CB_Event messages
rospy.Subscriber(self.DEFAULT_IN_TOPIC, CB_Event, callback=self.cb_event_handler)
# 10hz
rate = rospy.Rate(10)
while not rospy.is_shutdown():
# we poll with a timeout of 0 millisecond so that we use the rate.sleep()
# at the end to scan the time
events = self.po.poll(0)
for fd, flag in events:
# retrieve the actual socket from its file descriptor
s = self.fd_to_socket[fd]
if flag & (select.POLLIN | select.POLLPRI):
# handle inputs
if s is self.rcm_driver_s_server:
# input for server means that someone is asking for connection
self.connection, self.client_address = s.accept()
rospy.loginfo("new connection from %s" % str(self.client_address))
self.connection.setblocking(0)
self.fd_to_socket[self.connection.fileno()] = self.connection
self.po.register(self.connection, self.READ_ONLY)
else:
# input for connection means that the client is sending something
# to the server
data = s.recv(self._buf_size)
if data:
# a readable client socket has data
rospy.loginfo("received '%s' from %s" % (data, s.getpeername()))
# try to decode
import json
try:
request = json.loads(data)
except Exception as e:
# if an error occurs means that the message is wrong (or is splitted in more
# the one part)
rospy.logerr("received non-json msg from rcm platform node: %s" % e)
else:
# we could decode the message
if request[self.COMMAND_KEY] == self.NOTIFY_CONNECTION \
or request[self.COMMAND_KEY] == self.NOTIFY_DISCONNECTION:
# this is the case of requests received about the robot status
topic_msg = Robot_Event()
topic_msg.instance_name = self.robot_name = request[self.R_NAME_KEY]
if request[self.COMMAND_KEY] == self.NOTIFY_CONNECTION:
topic_msg.instance_status = topic_msg.CONNECTED
rospy.loginfo("publishing the connection of %s" % topic_msg.instance_name)
# only when we receive the robot connection we could need output channel
# so we add output channel for sending
self.po.modify(s, self.READ_WRITE)
elif request[self.COMMAND_KEY] == self.NOTIFY_DISCONNECTION:
topic_msg.instance_status = topic_msg.DISCONNECTED
rospy.loginfo("publishing the disconnection of %s" % topic_msg.instance_name)
self.rcm_re_publisher.publish(topic_msg)
elif flag & select.POLLHUP:
# handle client hung up
rospy.loginfo("closing %s after receiving HUP" % self.client_address)
self.po.unregister(s)
try:
s.shutdown()
except:
pass
finally:
s.close()
elif flag & select.POLLOUT:
# handle output
# rcm driver can send only the entity status
try:
es_response = self._entity_status.get_nowait()
except Queue.Empty:
# no messages of entity status
pass
else:
rospy.loginfo("sending '%s' to %s" % (es_response, s.getpeername()))
s.sendall(es_response)
elif flag & select.POLLERR:
# handle error
rospy.loginfo("handling exceptional condition for %s" % s.getpeername())
self.po.unregister(s)
try:
s.shutdown()
except:
pass
finally:
s.close()
rate.sleep()
rospy.loginfo("---- rcm driver ended ----")
if __name__ == '__main__':
try:
rcm_d = RCMDriver()
rcm_d.run()
except rospy.ROSInterruptException:
pass
|
|
"""Test the Nanoleaf config flow."""
from __future__ import annotations
from unittest.mock import AsyncMock, MagicMock, patch
from aionanoleaf import InvalidToken, NanoleafException, Unauthorized, Unavailable
import pytest
from homeassistant import config_entries
from homeassistant.components.nanoleaf.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_TOKEN
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
TEST_NAME = "Canvas ADF9"
TEST_HOST = "192.168.0.100"
TEST_OTHER_HOST = "192.168.0.200"
TEST_TOKEN = "R34F1c92FNv3pcZs4di17RxGqiLSwHM"
TEST_OTHER_TOKEN = "Qs4dxGcHR34l29RF1c92FgiLQBt3pcM"
TEST_DEVICE_ID = "5E:2E:EA:XX:XX:XX"
TEST_OTHER_DEVICE_ID = "5E:2E:EA:YY:YY:YY"
def _mock_nanoleaf(
host: str = TEST_HOST,
auth_token: str = TEST_TOKEN,
authorize_error: Exception | None = None,
get_info_error: Exception | None = None,
):
nanoleaf = MagicMock()
nanoleaf.name = TEST_NAME
nanoleaf.host = host
nanoleaf.auth_token = auth_token
nanoleaf.authorize = AsyncMock(side_effect=authorize_error)
nanoleaf.get_info = AsyncMock(side_effect=get_info_error)
return nanoleaf
async def test_user_unavailable_user_step_link_step(hass: HomeAssistant) -> None:
"""Test we handle Unavailable in user and link step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
assert not result2["last_step"]
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable,
):
result3 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result3["type"] == "abort"
assert result3["reason"] == "cannot_connect"
@pytest.mark.parametrize(
"error, reason",
[
(Unavailable, "cannot_connect"),
(InvalidToken, "invalid_token"),
(Exception, "unknown"),
],
)
async def test_user_error_setup_finish(
hass: HomeAssistant, error: Exception, reason: str
) -> None:
"""Test abort flow if on error in setup_finish."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
), patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.get_info",
side_effect=error,
):
result3 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result3["type"] == "abort"
assert result3["reason"] == reason
async def test_user_not_authorizing_new_tokens_user_step_link_step(
hass: HomeAssistant,
) -> None:
"""Test we handle NotAuthorizingNewTokens in user step and link step."""
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf",
return_value=_mock_nanoleaf(authorize_error=Unauthorized()),
) as mock_nanoleaf, patch(
"homeassistant.components.nanoleaf.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
assert result["step_id"] == "user"
assert not result["last_step"]
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "link"
result3 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result3["type"] == "form"
assert result3["errors"] is None
assert result3["step_id"] == "link"
result4 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result4["type"] == "form"
assert result4["errors"] == {"base": "not_allowing_new_tokens"}
assert result4["step_id"] == "link"
mock_nanoleaf.return_value.authorize.side_effect = None
result5 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result5["type"] == "create_entry"
assert result5["title"] == TEST_NAME
assert result5["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_exception_user_step(hass: HomeAssistant) -> None:
"""Test we handle Exception errors in user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf",
return_value=_mock_nanoleaf(authorize_error=Exception()),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
assert not result2["last_step"]
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf",
return_value=_mock_nanoleaf(),
) as mock_nanoleaf:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result3["step_id"] == "link"
mock_nanoleaf.return_value.authorize.side_effect = Exception()
result4 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result4["type"] == "form"
assert result4["step_id"] == "link"
assert result4["errors"] == {"base": "unknown"}
mock_nanoleaf.return_value.authorize.side_effect = None
mock_nanoleaf.return_value.get_info.side_effect = Exception()
result5 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result5["type"] == "abort"
assert result5["reason"] == "unknown"
@pytest.mark.parametrize(
"source, type_in_discovery_info",
[
(config_entries.SOURCE_HOMEKIT, "_hap._tcp.local"),
(config_entries.SOURCE_ZEROCONF, "_nanoleafms._tcp.local"),
(config_entries.SOURCE_ZEROCONF, "_nanoleafapi._tcp.local."),
],
)
async def test_discovery_link_unavailable(
hass: HomeAssistant, source: type, type_in_discovery_info: str
) -> None:
"""Test discovery and abort if device is unavailable."""
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.get_info",
), patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={},
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": source},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{type_in_discovery_info}",
"type": type_in_discovery_info,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {"name": TEST_NAME}
assert context["unique_id"] == TEST_NAME
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable,
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_reauth(hass: HomeAssistant) -> None:
"""Test Nanoleaf reauth flow."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_NAME,
data={CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_OTHER_TOKEN},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf",
return_value=_mock_nanoleaf(),
), patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": entry.entry_id,
"unique_id": entry.unique_id,
},
data=entry.data,
)
assert result["type"] == "form"
assert result["step_id"] == "link"
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
assert entry.data[CONF_HOST] == TEST_HOST
assert entry.data[CONF_TOKEN] == TEST_TOKEN
async def test_import_config(hass: HomeAssistant) -> None:
"""Test configuration import."""
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf",
return_value=_mock_nanoleaf(TEST_HOST, TEST_TOKEN),
), patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"error, reason",
[
(Unavailable, "cannot_connect"),
(InvalidToken, "invalid_token"),
(Exception, "unknown"),
],
)
async def test_import_config_error(
hass: HomeAssistant, error: NanoleafException, reason: str
) -> None:
"""Test configuration import with errors in setup_finish."""
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.get_info",
side_effect=error,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "abort"
assert result["reason"] == reason
@pytest.mark.parametrize(
"source, type_in_discovery",
[
(config_entries.SOURCE_HOMEKIT, "_hap._tcp.local"),
(config_entries.SOURCE_ZEROCONF, "_nanoleafms._tcp.local"),
(config_entries.SOURCE_ZEROCONF, "_nanoleafapi._tcp.local"),
],
)
@pytest.mark.parametrize(
"nanoleaf_conf_file, remove_config",
[
({TEST_DEVICE_ID: {"token": TEST_TOKEN}}, True),
({TEST_HOST: {"token": TEST_TOKEN}}, True),
(
{
TEST_DEVICE_ID: {"token": TEST_TOKEN},
TEST_HOST: {"token": TEST_OTHER_TOKEN},
},
True,
),
(
{
TEST_DEVICE_ID: {"token": TEST_TOKEN},
TEST_OTHER_HOST: {"token": TEST_OTHER_TOKEN},
},
False,
),
(
{
TEST_OTHER_DEVICE_ID: {"token": TEST_OTHER_TOKEN},
TEST_HOST: {"token": TEST_TOKEN},
},
False,
),
],
)
async def test_import_discovery_integration(
hass: HomeAssistant,
source: str,
type_in_discovery: str,
nanoleaf_conf_file: dict[str, dict[str, str]],
remove_config: bool,
) -> None:
"""
Test discovery integration import.
Test with different discovery flow sources and corresponding types.
Test with different .nanoleaf_conf files with device_id (>= 2021.4), host (< 2021.4) and combination.
Test removing the .nanoleaf_conf file if it was the only device in the file.
Test updating the .nanoleaf_conf file if it was not the only device in the file.
"""
with patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value=dict(nanoleaf_conf_file),
), patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf",
return_value=_mock_nanoleaf(TEST_HOST, TEST_TOKEN),
), patch(
"homeassistant.components.nanoleaf.config_flow.save_json",
return_value=None,
) as mock_save_json, patch(
"homeassistant.components.nanoleaf.config_flow.os.remove",
return_value=None,
) as mock_remove, patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": source},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{type_in_discovery}",
"type": type_in_discovery,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
if remove_config:
mock_save_json.assert_not_called()
mock_remove.assert_called_once()
else:
mock_save_json.assert_called_once()
mock_remove.assert_not_called()
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_ssdp_discovery(hass: HomeAssistant) -> None:
"""Test SSDP discovery."""
with patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={},
), patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf",
return_value=_mock_nanoleaf(TEST_HOST, TEST_TOKEN),
), patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"_host": TEST_HOST,
"nl-devicename": TEST_NAME,
"nl-deviceid": TEST_DEVICE_ID,
},
)
assert result["type"] == "form"
assert result["errors"] is None
assert result["step_id"] == "link"
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_NAME
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
|
|
# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
import sas
from util import winnforum_testcase
class RegistrationTestcase(unittest.TestCase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
pass
@winnforum_testcase
def test_WINNF_FT_S_REG_1(self):
"""New Multi-Step registration for CBSD Cat A (No existing CBSD ID).
The response should be SUCCESS.
"""
# Pre-load conditional parameters
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
conditionals_a = {
'cbsdCategory': 'A',
'fccId': device_a['fccId'],
'cbsdSerialNumber': device_a['cbsdSerialNumber'],
'airInterface': device_a['airInterface'],
'installationParam': device_a['installationParam']}
conditionals = {'registrationData': [conditionals_a]}
self._sas_admin.PreloadRegistrationData(conditionals)
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
# Register the device
del device_a['cbsdCategory']
del device_a['airInterface']
del device_a['installationParam']
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertTrue('cbsdId' in response)
self.assertFalse('measReportConfig' in response)
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_2(self):
"""New Multi-Step registration for CBSD Cat B (No existing CBSD ID).
The response should be SUCCESS.
"""
# Pre-load conditional parameters
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
conditionals_b = {
'cbsdCategory': 'B', 'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam']}
conditionals = {'registrationData': [conditionals_b]}
self._sas_admin.PreloadRegistrationData(conditionals)
# Register the device
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['installationParam']
request = {'registrationRequest': [device_b]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertTrue('cbsdId' in response)
self.assertFalse('measReportConfig' in response)
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_3(self):
"""Array Multi-Step registration for CBSD Cat A&B (No existing CBSD ID)
The response should be SUCCESS.
"""
# Pre-load conditional parameters
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
conditionals_a = {
'cbsdCategory': device_a['cbsdCategory'],
'fccId': device_a['fccId'],
'cbsdSerialNumber': device_a['cbsdSerialNumber'],
'airInterface': device_a['airInterface'],
'installationParam': device_a['installationParam']}
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam']}
conditionals_c = {
'cbsdCategory': device_c['cbsdCategory'],
'fccId': device_c['fccId'],
'cbsdSerialNumber': device_c['cbsdSerialNumber'],
'airInterface': device_c['airInterface'],
'installationParam': device_c['installationParam']}
conditionals = {'registrationData': [conditionals_a, conditionals_b, conditionals_c]};
self._sas_admin.PreloadRegistrationData(conditionals)
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
# Remove conditionals from registration
del device_a['cbsdCategory']
del device_a['airInterface']
del device_a['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['installationParam']
del device_c['cbsdCategory']
del device_c['airInterface']
del device_c['installationParam']
# Register the devices
devices = [device_a, device_b, device_c]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
for x in range (0, 3):
self.assertTrue('cbsdId' in response['registrationResponse'][x])
self.assertFalse('measReportConfig' in response['registrationResponse'][x])
self.assertEqual(response['registrationResponse'][x]['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_4(self):
"""Re-registration of Multi-step-registered CBSD (CBSD ID exists)
The response should be SUCCESS.
"""
# Pre-load conditional parameters
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam']}
conditionals = {'registrationData': [conditionals_b]}
self._sas_admin.PreloadRegistrationData(conditionals)
# Inject FCC ID
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
# Remove conditionals from registration
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['installationParam']
# Register the device
request = {'registrationRequest': [device_b]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertTrue('cbsdId' in response)
self.assertFalse('measReportConfig' in response)
self.assertEqual(response['response']['responseCode'], 0)
cbsdId = response['cbsdId']
del request, response
# Re-register the device
request = {'registrationRequest': [device_b]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertTrue('cbsdId' in response)
self.assertFalse('measReportConfig' in response)
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_5(self):
"""Array Re-registration of Multi-step-registered CBSD (CBSD ID exists)
The response should be SUCCESS.
"""
# Pre-load conditional parameters
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
conditionals_a = {
'cbsdCategory': device_a['cbsdCategory'],
'fccId': device_a['fccId'],
'cbsdSerialNumber': device_a['cbsdSerialNumber'],
'airInterface': device_a['airInterface'],
'installationParam': device_a['installationParam']}
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam']}
conditionals_c = {
'cbsdCategory': device_c['cbsdCategory'],
'fccId': device_c['fccId'],
'cbsdSerialNumber': device_c['cbsdSerialNumber'],
'airInterface': device_c['airInterface'],
'installationParam': device_c['installationParam']}
conditionals = {'registrationData': [conditionals_a, conditionals_b, conditionals_c]};
self._sas_admin.PreloadRegistrationData(conditionals)
# Inject FCC IDs for first two devices
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
# Remove conditionals from registration
del device_a['cbsdCategory']
del device_a['airInterface']
del device_a['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['installationParam']
del device_c['cbsdCategory']
del device_c['airInterface']
del device_c['installationParam']
# Register the devices
devices = [device_a, device_b]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
for x in range(0, 2):
self.assertTrue('cbsdId' in response['registrationResponse'][x])
self.assertFalse('measReportConfig' in response['registrationResponse'][x])
self.assertEqual(response['registrationResponse'][x]['response']['responseCode'], 0)
del request, response, devices
# Register the devices
devices = [device_a, device_b, device_c]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
for x in range(0, 3):
self.assertTrue('cbsdId' in response['registrationResponse'][x])
self.assertFalse('measReportConfig' in response['registrationResponse'][x])
self.assertEqual(response['registrationResponse'][x]['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_6(self):
""" Single-Step registration (Cat A CBSD with no existing CBSD ID)
The response should be SUCCESS.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_a['measCapability'] = []
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertTrue('cbsdId' in response)
self.assertFalse('measReportConfig' in response)
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_7(self):
""" Array Single-Step registration (Cat A CBSD with no existing CBSD ID)
The response should be SUCCESS.
"""
# Load the devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
device_e = json.load(
open(os.path.join('testcases', 'testdata', 'device_e.json')))
# The measCapability contains no value for all array elements
device_a['measCapability'] = []
device_c['measCapability'] = []
device_e['measCapability'] = []
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
self._sas_admin.InjectFccId({'fccId': device_e['fccId']})
# Register the devices
devices = [device_a, device_c, device_e]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
for x in range(0, 3):
self.assertTrue('cbsdId' in response['registrationResponse'][x])
self.assertFalse('measReportConfig' in response['registrationResponse'][x])
self.assertEqual(response['registrationResponse'][x]['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_8(self):
""" Re-registration of Single-step-registered CBSD (CBSD ID exists)
The response should be SUCCESS.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_a['measCapability'] = []
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertTrue('cbsdId' in response)
self.assertEqual(response['response']['responseCode'], 0)
del response
# Re-register the device
response = self._sas.Registration(request)['registrationResponse'][0]
self.assertTrue('cbsdId' in response)
self.assertFalse('measReportConfig' in response)
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_9(self):
""" Array Re-registration of Single-step-registered CBSD (CBSD ID exists)
The response should be SUCCESS.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
# Make sure measCapability contains no value for all array elements
device_a['measCapability'] = []
device_b['measCapability'] = []
device_c['measCapability'] = []
# Register two devices
request = {'registrationRequest': [device_a, device_b]}
response = self._sas.Registration(request)
# Check registration response
self.assertEqual(len(response['registrationResponse']), 2)
for resp in response['registrationResponse']:
self.assertTrue('cbsdId' in resp)
self.assertEqual(resp['response']['responseCode'], 0)
del request, response
# Re-register two devices, register third device
devices = [device_a, device_b, device_c]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
self.assertEqual(len(response['registrationResponse']), len(devices))
for resp in response['registrationResponse']:
self.assertTrue('cbsdId' in resp)
self.assertFalse('measReportConfig' in resp)
self.assertEqual(resp['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_REG_10(self):
"""CBSD registration request with missing required parameter.
The required parameter 'userId' is missing in a registration request,
the response should be FAIL.
"""
# Register the device, make sure at least one required parameter is missing
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
del device_a['userId']
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertFalse('cbsdId' in response)
self.assertEqual(response['response']['responseCode'], 102)
@winnforum_testcase
def test_WINNF_FT_S_REG_11(self):
"""Missing Required parameters in Array request (responseCode 102)
The response should be MISSING_PARAM 102.
"""
# Load devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
device_d = json.load(
open(os.path.join('testcases', 'testdata', 'device_d.json')))
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
self._sas_admin.InjectFccId({'fccId': device_d['fccId']})
# Pre-load conditionals
conditionals_a = {
'cbsdCategory': device_a['cbsdCategory'],
'fccId': device_a['fccId'],
'cbsdSerialNumber': device_a['cbsdSerialNumber'],
'airInterface': device_a['airInterface'],
'installationParam': device_a['installationParam']}
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam']}
conditionals_c = {
'cbsdCategory': device_c['cbsdCategory'],
'fccId': device_c['fccId'],
'cbsdSerialNumber': device_c['cbsdSerialNumber'],
'airInterface': device_c['airInterface'],
'installationParam': device_c['installationParam']}
conditionals_d = {
'cbsdCategory': device_d['cbsdCategory'],
'fccId': device_d['fccId'],
'cbsdSerialNumber': device_d['cbsdSerialNumber'],
'airInterface': device_d['airInterface'],
'installationParam': device_d['installationParam']}
conditionals = {'registrationData': [
conditionals_a, conditionals_b, conditionals_c, conditionals_d]};
self._sas_admin.PreloadRegistrationData(conditionals)
# Device 2 missing cbsdSerialNumber
del device_b['cbsdSerialNumber']
# Device 3 missing fccId
del device_c['fccId']
# Device 4 missing userId
del device_d['userId']
# Register devices
devices = [device_a, device_b, device_c, device_d]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
self.assertTrue('cbsdId' in response['registrationResponse'][0])
self.assertEqual(response['registrationResponse'][0]['response']['responseCode'], 0)
for x in range(0, 4):
self.assertFalse('measReportConfig' in response['registrationResponse'][x])
for x in range(1, 4):
self.assertEqual(response['registrationResponse'][x]['response']['responseCode'], 102)
@winnforum_testcase
def test_WINNF_FT_S_REG_12(self):
"""Pending registration for Cat A CBSD (responseCode 200)
The response should be FAILURE 200.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
# Make sure one conditional parameter is missing
del device_a['installationParam']['heightType']
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertFalse('cbsdId' in response)
self.assertEqual(response['response']['responseCode'], 200)
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertFalse('cbsdId' in response)
self.assertEqual(response['response']['responseCode'], 200)
@winnforum_testcase
def test_WINNF_FT_S_REG_13(self):
"""Pending registration for Cat B CBSD (responseCode 200)
The response should be FAILURE 200.
"""
# Register the device
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
self.assertEqual(device_b['cbsdCategory'], 'B')
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
# Make sure one conditional parameter is missing
del device_b['installationParam']['antennaDowntilt']
request = {'registrationRequest': [device_b]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertFalse('cbsdId' in response)
self.assertEqual(response['response']['responseCode'], 200)
@winnforum_testcase
def test_WINNF_FT_S_REG_14(self):
"""Pending registration in Array request (responseCode 200)
The response should be FAILURE.
"""
# Register the devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_d = json.load(
open(os.path.join('testcases', 'testdata', 'device_d.json')))
# Device #1 is Category A
self.assertTrue(device_a['cbsdCategory'], 'A')
# Device #2 is Category A with one conditional parameter missing
self.assertTrue(device_c['cbsdCategory'], 'A')
del device_c['installationParam']['indoorDeployment']
# Device #3 is Category B
self.assertTrue(device_b['cbsdCategory'], 'B')
# Device #4 is Category B with one conditional missing and conditionals pre-loaded
self.assertTrue(device_d['cbsdCategory'], 'B')
conditionals_d = {
'cbsdCategory': device_d['cbsdCategory'],
'fccId': device_d['fccId'],
'cbsdSerialNumber': device_d['cbsdSerialNumber'],
'airInterface': device_d['airInterface'],
'installationParam': device_d['installationParam']}
del conditionals_d['installationParam']['antennaBeamwidth']
conditionals = {'registrationData': [conditionals_d]}
self._sas_admin.PreloadRegistrationData(conditionals)
# Inject FCC ID's
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
self._sas_admin.InjectFccId({'fccId': device_d['fccId']})
device_a['measCapability'] = []
device_c['measCapability'] = []
device_b['measCapability'] = []
device_d['measCapability'] = []
# Register devices
devices = [device_a, device_c, device_b, device_d]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
self.assertTrue('cbsdId' in response['registrationResponse'][0])
for resp in response['registrationResponse']:
self.assertFalse('measReportConfig' in resp)
self.assertEqual(response['registrationResponse'][0]['response']['responseCode'], 0)
for resp in response['registrationResponse'][1:]:
self.assertEqual(resp['response']['responseCode'], 200)
@winnforum_testcase
def test_WINNF_FT_S_REG_15(self):
"""Invalid parameters in Array request (responseCode 103)
The response should be SUCCESS for the first device,
FAIL for the second, third, fourth, fifth, and sixth devices.
"""
# Load the devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
device_e = json.load(
open(os.path.join('testcases', 'testdata', 'device_e.json')))
device_f = json.load(
open(os.path.join('testcases', 'testdata', 'device_f.json')))
device_g = json.load(
open(os.path.join('testcases', 'testdata', 'device_g.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
devices = [device_a, device_c, device_e, device_f, device_g, device_b]
for device in devices:
# meascapability has no value for all devices
device['measCapability'] = []
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device['fccId']})
# Device 1 Cat A
self.assertEqual(device_a['cbsdCategory'], 'A')
# Device 2 Cat A invalid cbsdSerialNumber - above max length of 64 octets
self.assertEqual(device_c['cbsdCategory'], 'A')
device_c['cbsdSerialNumber'] = 'a' * 65
# Device 3 Cat A invalid fccId - above max length of 19 chars
self.assertEqual(device_e['cbsdCategory'], 'A')
device_e['fccId'] = 'a' * 20
# Device 4 Cat A invalid userId - invalid char (RFC-7542 Section 2.2)
self.assertEqual(device_f['cbsdCategory'], 'A')
device_f['userId'] = '@'
# Device 5 Cat A invalid latitude - invalid type
self.assertEqual(device_g['cbsdCategory'], 'A')
device_g['installationParam']['latitude'] = 91
# Device 6 Cat B
self.assertEqual(device_b['cbsdCategory'], 'B')
device_b['installationParam']['eirpCapability'] = 48
# Pre-load conditionals for Device 6
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam']}
conditionals = {'registrationData': [conditionals_b]}
self._sas_admin.PreloadRegistrationData(conditionals)
# Register devices
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
self.assertTrue('cbsdId' in response['registrationResponse'][0])
self.assertEqual(response['registrationResponse'][0]['response']['responseCode'], 0)
for resp in response['registrationResponse']:
self.assertFalse('measReportConfig' in resp)
for resp in response['registrationResponse'][1:]:
self.assertEqual(resp['response']['responseCode'], 103)
@winnforum_testcase
def test_WINNF_FT_S_REG_16(self):
"""Invalid Conditional parameters in Array request (responseCode 103)
The response should be SUCCESS for the first CBSD,
FAILURE 103 for the second and third CBSDs.
"""
# Load devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
device_e = json.load(
open(os.path.join('testcases', 'testdata', 'device_e.json')))
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
self._sas_admin.InjectFccId({'fccId': device_e['fccId']})
# Device 1 Cat A all valid conditionals
self.assertEqual(device_a['cbsdCategory'], 'A')
conditionals_a = {
'cbsdCategory': device_a['cbsdCategory'],
'fccId': device_a['fccId'],
'cbsdSerialNumber': device_a['cbsdSerialNumber'],
'airInterface': device_a['airInterface'],
'installationParam': device_a['installationParam']}
# Device 2 Cat A out-of-range or the wrong type azimuth
self.assertEqual(device_c['cbsdCategory'], 'A')
conditionals_c = {
'cbsdCategory': device_c['cbsdCategory'],
'fccId': device_c['fccId'],
'cbsdSerialNumber': device_c['cbsdSerialNumber'],
'airInterface': device_c['airInterface'],
'installationParam': device_c['installationParam']}
conditionals_c['installationParam']['antennaAzimuth'] = -1
# Device 3 Cat A out-of-range, or the wrong Type value for latitude.
self.assertEqual(device_e['cbsdCategory'], 'A')
conditionals_e = {
'cbsdCategory': device_e['cbsdCategory'],
'fccId': device_e['fccId'],
'cbsdSerialNumber': device_e['cbsdSerialNumber'],
'airInterface': device_e['airInterface'],
'installationParam': device_e['installationParam']}
conditionals_e['installationParam']['latitude'] = '91'
conditionals = {'registrationData': [conditionals_a, conditionals_c, conditionals_e]};
self._sas_admin.PreloadRegistrationData(conditionals)
# Remove conditionals from registration
devices = [device_a, device_c, device_e]
for device in devices:
del device['cbsdCategory']
del device['airInterface']
del device['installationParam']
# Register the devices
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check registration response
self.assertTrue('cbsdId' in response['registrationResponse'][0])
for resp in response['registrationResponse']:
self.assertFalse('measReportConfig' in resp)
self.assertEqual(response['registrationResponse'][0]['response']['responseCode'], 0)
for resp in response['registrationResponse'][1:]:
self.assertEqual(resp['response']['responseCode'], 103)
@winnforum_testcase
def test_WINNF_FT_S_REG_17(self):
"""Blacklisted CBSD (responseCode 101)
The response should be FAILURE 101.
"""
# Register device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
# Register the device
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertTrue('cbsdId' in response)
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Blacklist the device
self._sas_admin.BlacklistByFccId({'fccId':device_a['fccId']})
# Re-register the device
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertFalse('cbsdId' in response)
self.assertEqual(response['response']['responseCode'], 101)
@winnforum_testcase
def test_WINNF_FT_S_REG_18(self):
"""Blacklisted CBSD in Array request (responseCode 101)
The response should be FAILURE 101.
"""
# Register the devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
devices = [device_a, device_b, device_c]
for device in devices:
self._sas_admin.InjectFccId({'fccId': device['fccId']})
device['measCapability'] = []
request = {'registrationRequest': devices}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
for resp in response:
self.assertTrue('cbsdId' in resp)
self.assertEqual(resp['response']['responseCode'], 0)
del request, response
# Blacklist the third device
self._sas_admin.BlacklistByFccId({'fccId':device_c['fccId']})
# Re-register the devices
request = {'registrationRequest': devices}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
self.assertEqual(len(response), len(devices))
for response_num, resp in enumerate(response[:2]):
self.assertEqual(resp['response']['responseCode'], 0)
self.assertTrue('cbsdId' in resp)
self.assertFalse('measReportConfig' in resp)
self.assertFalse('measReportConfig' in response[2])
self.assertEqual(response[2]['response']['responseCode'], 101)
@winnforum_testcase
def test_WINFF_FT_S_REG_19(self):
"""Unsupported SAS protocol version (responseCode 100 or HTTP status 404)
The response should be FAILURE.
"""
# Save sas version
version = self._sas._sas_version
# Use higher than supported version
self._sas._sas_version = 'v2.0'
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
try:
# Register
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 100)
self.assertFalse('cbsdId' in response)
except AssertionError as e:
# Allow HTTP status 404
self.assertEqual(e.args[0], 404)
finally:
# Put sas version back
self._sas._sas_version = version
@winnforum_testcase
def test_WINNF_FT_S_REG_20(self):
"""Unsupported SAS protocol version in Array request (responseCode 100)
The response should be FAILURE 100.
"""
# Save sas version
version = self._sas._sas_version
# Use higher than supported version
self._sas._sas_version = 'v2.0'
# Register the devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
devices = [device_a, device_b, device_c]
for device in devices:
self._sas_admin.InjectFccId({'fccId': device['fccId']})
request = {'registrationRequest': devices}
try:
response = self._sas.Registration(request)
# Check response
for resp in response['registrationResponse']:
self.assertEqual(resp['response']['responseCode'], 100)
self.assertFalse('cbsdId' in resp)
except AssertionError as e:
# Allow HTTP status 404
self.assertEqual(e.args[0], 404)
finally:
# Put sas version back
self._sas._sas_version = version
@winnforum_testcase
def test_WINNF_FT_S_REG_21(self):
"""Group Error (responseCode 201)
The response should be FAILURE
"""
# Load device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
# Create invalid group - only 'INTERFERENCE_COORDINATION' allowed
device_a['groupingParam'] = [
{'groupType': 'FAKE_GROUP_TYPE',
'groupId': '1234'}
]
# Inject FCC ID
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
# Register device
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check response
self.assertTrue(response['response']['responseCode'] in (103, 201))
self.assertFalse('cbsdId' in response)
@winnforum_testcase
def test_WINNF_FT_S_REG_22(self):
"""Group Error in Array request (responseCode 201)
The response should be SUCCESS for the first two devices,
FAILURE for the third device.
"""
# Register the devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
# Device #3 invalid group - only 'INTERFERENCE_COORDINATION' allowed
device_c['groupingParam'] = [
{'groupType': 'FAKE_GROUP_TYPE',
'groupId': '1234'}
]
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
# Register devices
devices = [device_a, device_b, device_c]
request = {'registrationRequest': devices}
response = self._sas.Registration(request)
# Check response
for resp in response['registrationResponse'][:2]:
self.assertEqual(resp['response']['responseCode'], 0)
self.assertTrue('cbsdId' in resp)
self.assertTrue(response['registrationResponse'][2]['response']['responseCode'] in (103, 201))
@winnforum_testcase
def test_WINNF_FT_S_REG_23(self):
"""CBSD Cat A attempts to register with HAAT >6m
The response should be FAILURE 103
Note: WINNF-15-S-0061-CBRS Architecture Test and Certification
Specification - SAS Operation v0.6.4 has 202, but the TS
has removed 202 so this should be 103).
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
device_a['installationParam']['latitude'] = 38.882162
device_a['installationParam']['longitude'] = -77.113755
device_a['installationParam']['height'] = 8
device_a['installationParam']['heightType'] = 'AGL'
device_a['installationParam']['indoorDeployment'] = False
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 103)
self.assertFalse('cbsdId' in response)
@winnforum_testcase
def test_WINFF_FT_S_REG_24(self):
"""CBSD Cat A attempts to register with eirpCapability > 30 dBm/10MHz
The response should be FAILURE 103.
Note: WINNF-15-S-0061-CBRS Architecture Test and Certification
Specification - SAS Operation v0.6.4 has 202, but the TS
has removed 202 so this should be 103).
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
device_a['installationParam']['eirpCapability'] = 31
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 103)
self.assertFalse('cbsdId' in response)
@winnforum_testcase
def test_WINFF_FT_S_REG_25(self):
"""CBSD Cat B attempts to register as Indoors deployment
The response should be FAILURE 103.
Note: WINNF-15-S-0061-CBRS Architecture Test and Certification
Specification - SAS Operation v0.6.4 has 202, but the TS
has removed 202 so this should be 103).
"""
# Register the device
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
device_b['installationParam']['indoorDeployment'] = True
request = {'registrationRequest': [device_b]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 103)
self.assertFalse('cbsdId' in response)
@winnforum_testcase
def test_WINNF_FT_S_REG_26(self):
"""Category Error in Array request (responseCode 103)
The response should be SUCCESS for the first CBSD,
CATEGORY_ERROR for the second, third, and fourth CBSDs.
Note: WINNF-15-S-0061-CBRS Architecture Test and Certification
Specification - SAS Operation v0.6.4 has 202, but the TS
has removed 202 so this should be 103).
"""
device_1 = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_2 = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
device_3 = json.load(
open(os.path.join('testcases', 'testdata', 'device_e.json')))
device_4 = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
# Device A category A
self.assertEqual(device_1['cbsdCategory'], 'A')
# Device 2 category A
device_2['installationParam']['latitude'] = 38.882162
device_2['installationParam']['longitude'] = -77.113755
device_2['installationParam']['height'] = 8
device_2['installationParam']['heightType'] = 'AGL'
device_2['installationParam']['indoorDeployment'] = False
self.assertEqual(device_2['cbsdCategory'], 'A')
# Device 3 category A eirpCapability > 30 dBm/10MHz
device_3['installationParam']['eirpCapability'] = 31
self.assertEqual(device_3['cbsdCategory'], 'A')
# Device 4 category B indoorDeployment true
device_4['installationParam']['indoorDeployment'] = True
self.assertEqual(device_4['cbsdCategory'], 'B')
# Pre-load conditionals for Device 4
conditionals_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam']}
conditionals = {'registrationData': [conditionals_4]}
self._sas_admin.PreloadRegistrationData(conditionals)
# Inject FCC IDs
self._sas_admin.InjectFccId({'fccId': device_1['fccId']})
self._sas_admin.InjectFccId({'fccId': device_2['fccId']})
self._sas_admin.InjectFccId({'fccId': device_3['fccId']})
self._sas_admin.InjectFccId({'fccId': device_4['fccId']})
devices = [device_1, device_2, device_3, device_4]
request = {'registrationRequest': devices}
# Register devices
response = self._sas.Registration(request)
# First device success
self.assertTrue('cbsdId' in response['registrationResponse'][0])
self.assertFalse('measReportConfig' in response['registrationResponse'][0])
self.assertEqual(response['registrationResponse'][0]['response']['responseCode'], 0)
# Second, third, fourth devices failure 103
for x in range (1,4):
self.assertEqual(response['registrationResponse'][x]['response']['responseCode'], 103)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from nova import exception
from nova.openstack.common.gettextutils import _
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
fake.reset()
def test_get_datastore_ref_and_name(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore())
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500 * 1024)
def test_get_datastore_ref_and_name_with_regex(self):
# Test with a regex that matches with a datastore
datastore_valid_regex = re.compile("^openstack.*\d$")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None, datastore_valid_regex)
self.assertEquals("openstack-ds0", result[1])
def test_get_datastore_ref_and_name_with_list(self):
# Test with a regex containing whitelist of datastores
datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("openstack-ds1"))
fake_objects.add_object(fake.Datastore("openstack-ds2"))
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None, datastore_valid_regex)
self.assertNotEquals("openstack-ds1", result[1])
def test_get_datastore_ref_and_name_with_regex_error(self):
# Test with a regex that has no match
# Checks if code raises DatastoreNotFound with a specific message
datastore_invalid_regex = re.compile("unknown-ds")
exp_message = (_("Datastore regex %s did not match any datastores")
% datastore_invalid_regex.pattern)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
# assertRaisesRegExp would have been a good choice instead of
# try/catch block, but it's available only from Py 2.7.
try:
vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None,
datastore_invalid_regex)
except exception.DatastoreNotFound as e:
self.assertEquals(exp_message, e.args[0])
else:
self.fail("DatastoreNotFound Exception was not raised with "
"message: %s" % exp_message)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
def test_get_host_ref_from_id(self):
fake_host_name = "ha-host"
fake_host_sys = fake.HostSystem(fake_host_name)
fake_host_id = fake_host_sys.obj.value
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_host_sys)
ref = vm_util.get_host_ref_from_id(
fake_session(fake_objects), fake_host_id, ['name'])
self.assertIsInstance(ref, fake.HostSystem)
self.assertEqual(fake_host_id, ref.obj.value)
host_name = vm_util.get_host_name_from_host_ref(ref)
self.assertEquals(fake_host_name, host_name)
def test_get_host_name_for_vm(self):
fake_host = fake.HostSystem()
fake_host_id = fake_host.obj.value
fake_vm = fake.VirtualMachine(name='vm-123',
runtime_host=fake_host.obj)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_vm)
vm_ref = vm_util.get_vm_ref_from_name(
fake_session(fake_objects), 'vm-123')
self.assertIsNotNone(vm_ref)
host_id = vm_util.get_host_id_from_vm_ref(
fake_session(fake_objects), vm_ref)
self.assertEqual(fake_host_id, host_id)
def test_property_from_property_set(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
good_objects = fake.FakeRetrieveResult()
results_good = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val=MoRef(value='bar1')),
DynamicProperty(
name='runtime.host', val=MoRef(value='host-123')),
DynamicProperty(name='foo', val=MoRef(value='bar2')),
]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_good:
good_objects.add_object(result)
bad_objects = fake.FakeRetrieveResult()
results_bad = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val='bar1'),
DynamicProperty(name='foo', val='bar2'), ]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_bad:
bad_objects.add_object(result)
prop = vm_util.property_from_property_set(
'runtime.host', good_objects)
self.assertIsNotNone(prop)
value = prop.val.value
self.assertEqual('host-123', value)
prop2 = vm_util.property_from_property_set(
'runtime.host', bad_objects)
self.assertIsNone(prop2)
prop3 = vm_util.property_from_property_set('foo', good_objects)
self.assertIsNotNone(prop3)
val3 = prop3.val.value
self.assertEqual('bar1', val3)
prop4 = vm_util.property_from_property_set('foo', bad_objects)
self.assertIsNotNone(prop4)
self.assertEqual('bar1', prop4.val)
def test_get_datastore_ref_and_name_inaccessible_ds(self):
data_store = fake.Datastore()
data_store.set("summary.accessible", False)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(fake_objects))
def test_get_cdrom_attach_config_spec(self):
result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
fake.Datastore(),
"/tmp/foo.iso",
0)
expected = """{
'deviceChange': [
{
'device': {
'connectable': {
'allowGuestControl': False,
'startConnected': True,
'connected': True,
'obj_name': 'ns0: VirtualDeviceConnectInfo'
},
'backing': {
'datastore': {
"summary.type": "VMFS",
"summary.freeSpace": 536870912000,
"summary.capacity": 1099511627776,
"summary.accessible":true,
"summary.name": "fake-ds"
},
'fileName': '/tmp/foo.iso',
'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
},
'controllerKey': 200,
'unitNumber': 0,
'key': -1,
'obj_name': 'ns0: VirtualCdrom'
},
'operation': 'add',
'obj_name': 'ns0: VirtualDeviceConfigSpec'
}
],
'obj_name': 'ns0: VirtualMachineConfigSpec'
}
"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="lsiLogicsas")
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_get_vmdk_path_and_adapter_type(self):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
filename = '[test_datastore] test_file.vmdk'
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, controller]
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
adapter_type = vmdk_info[2]
self.assertEqual('lsiLogicsas', adapter_type)
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def _test_get_vnc_config_spec(self, port, password):
result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
port, password)
return result
def test_get_vnc_config_spec(self):
result = self._test_get_vnc_config_spec(7, None)
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vnc_config_spec_password(self):
result = self._test_get_vnc_config_spec(7, 'password')
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'},
{'value':'password',
'key':'RemoteDisplay.vnc.password',
'obj_name':'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import netutil
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
from tornado import gen
from tornado.http1connection import HTTP1Connection
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado.netutil import ssl_options_to_context
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
from contextlib import closing
import datetime
import gzip
import os
import shutil
import socket
import ssl
import sys
import tempfile
from io import BytesIO
def read_stream_body(stream, callback):
"""Reads an HTTP response from `stream` and runs callback with its
headers and body."""
chunks = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
self.headers = headers
def data_received(self, chunk):
chunks.append(chunk)
def finish(self):
callback((self.headers, b''.join(chunks)))
conn = HTTP1Connection(stream, True)
conn.read_response(Delegate())
class HandlerBaseTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
if self.request.protocol != self.expected_protocol:
raise Exception("unexpected protocol")
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
skipIfOldSSL = unittest.skipIf(
getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
"old version of ssl module and/or openssl")
class BaseSSLTest(AsyncHTTPSTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
class SSLTestMixin(object):
def get_ssl_options(self):
return dict(ssl_version=self.get_ssl_version(), # type: ignore
**AsyncHTTPSTestCase.get_ssl_options())
def get_ssl_version(self):
raise NotImplementedError()
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A' * 5000)
self.assertEqual(response.body, b"Got 5000 bytes in POST")
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
with ExpectLog(gen_log, 'Uncaught exception', required=False):
self.http_client.fetch(
self.get_url("/").replace('https:', 'http:'),
self.stop,
request_timeout=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
def test_error_logging(self):
# No stack traces are logged for SSL errors.
with ExpectLog(gen_log, 'SSL Error') as expect_log:
self.http_client.fetch(
self.get_url("/").replace("https:", "http:"),
self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertFalse(expect_log.logged_stack)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv23
@skipIfOldSSL
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv3
@skipIfOldSSL
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_TLSv1
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class SSLContextTest(BaseSSLTest, SSLTestMixin):
def get_ssl_options(self):
context = ssl_options_to_context(
AsyncHTTPSTestCase.get_ssl_options(self))
assert isinstance(context, ssl.SSLContext)
return context
class BadSSLOptionsTest(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(KeyError, HTTPServer, application, ssl_options={
"keyfile": "/__missing__.crt",
})
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, 'test.crt')
existing_key = os.path.join(module_dir, 'test.key')
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": "/__mising__.crt",
})
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key"
})
# This actually works because both files exist
HTTPServer(application, ssl_options={
"certfile": existing_certificate,
"keyfile": existing_key,
})
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body, newline=b"\r\n"):
with closing(IOStream(socket.socket())) as stream:
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
stream.write(
newline.join(headers +
[utf8("Content-Length: %d" % len(body))]) +
newline + newline + body)
read_stream_body(stream, self.stop)
headers, body = self.wait()
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9",
],
b"\r\n".join([
b"Content-Disposition: form-data; name=argument",
b"",
u"\u00e1".encode("utf-8"),
b"--1234567890",
u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"),
b"",
u"\u00fa".encode("utf-8"),
b"--1234567890--",
b"",
]))
data = json_decode(response)
self.assertEqual(u"\u00e9", data["header"])
self.assertEqual(u"\u00e1", data["argument"])
self.assertEqual(u"\u00f3", data["filename"])
self.assertEqual(u"\u00fa", data["filebody"])
def test_newlines(self):
# We support both CRLF and bare LF as line separators.
for newline in (b"\r\n", b"\n"):
response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"",
newline=newline)
self.assertEqual(response, b'Hello world')
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket(), io_loop=self.io_loop)
stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
b"Content-Length: 1024",
b"Expect: 100-continue",
b"Connection: close",
b"\r\n"]), callback=self.stop)
self.wait()
stream.read_until(b"\r\n\r\n", self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
stream.write(b"a" * 1024)
stream.read_until(b"\r\n", self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
stream.read_until(b"\r\n\r\n", self.stop)
header_data = self.wait()
headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Got 1024 bytes in POST")
stream.close()
class EchoHandler(RequestHandler):
def get(self):
self.write(recursive_unicode(self.request.arguments))
def post(self):
self.write(recursive_unicode(self.request.arguments))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
fields = [
('method', str),
('uri', str),
('version', str),
('remote_ip', str),
('protocol', str),
('host', str),
('path', str),
('query', str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type('header_key', list(self.request.headers.keys())[0], str)
self.check_type('header_value', list(self.request.headers.values())[0], str)
self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
# secure cookies
self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
def post(self):
self.check_type('body', self.request.body, bytes)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class HTTPServerTest(AsyncHTTPTestCase):
def get_app(self):
return Application([("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
("//doubleslash", EchoHandler),
])
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u"\u00e9"]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u"", u""]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
def test_double_slash(self):
# urlparse.urlsplit (which tornado.httpserver used to use
# incorrectly) would parse paths beginning with "//" as
# protocol-relative urls.
response = self.fetch("//doubleslash")
self.assertEqual(200, response.code)
self.assertEqual(json_decode(response.body), {})
def test_malformed_body(self):
# parse_qs is pretty forgiving, but it will fail on python 3
# if the data is not utf8. On python 2 parse_qs will work,
# but then the recursive_unicode call in EchoHandler will
# fail.
if str is bytes:
return
with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
response = self.fetch(
'/echo', method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=b'\xe9')
self.assertEqual(200, response.code)
self.assertEqual(b'{}', response.body)
class HTTPServerRawTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/echo', EchoHandler),
])
def setUp(self):
super(HTTPServerRawTest, self).setUp()
self.stream = IOStream(socket.socket())
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def tearDown(self):
self.stream.close()
super(HTTPServerRawTest, self).tearDown()
def test_empty_request(self):
self.stream.close()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
def test_malformed_first_line(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
# TODO: need an async version of ExpectLog so we don't need
# hard-coded timeouts here.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
self.stop)
self.wait()
def test_malformed_headers(self):
with ExpectLog(gen_log, '.*Malformed HTTP headers'):
self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
self.stop)
self.wait()
def test_chunked_request_body(self):
# Chunked requests are not widely supported and we don't have a way
# to generate them in AsyncHTTPClient, but HTTPServer will read them.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
headers, response = self.wait()
self.assertEqual(json_decode(response), {u'foo': [u'bar']})
def test_chunked_request_uppercase(self):
# As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is
# case-insensitive.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: Chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
headers, response = self.wait()
self.assertEqual(json_decode(response), {u'foo': [u'bar']})
def test_invalid_content_length(self):
with ExpectLog(gen_log, '.*Only integer Content-Length is allowed'):
self.stream.write(b"""\
POST /echo HTTP/1.1
Content-Length: foo
bar
""".replace(b"\n", b"\r\n"))
self.stream.read_until_close(self.stop)
self.wait()
class XHeaderTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(remote_ip=self.request.remote_ip,
remote_protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(xheaders=True)
def test_ip_headers(self):
self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
"4.4.4.4")
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
invalid_chars = {"X-Real-IP": "4.4.4.4<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars)["remote_ip"],
"127.0.0.1")
invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars_list)["remote_ip"],
"127.0.0.1")
invalid_host = {"X-Real-IP": "www.google.com"}
self.assertEqual(
self.fetch_json("/", headers=invalid_host)["remote_ip"],
"127.0.0.1")
def test_scheme_headers(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "http")
https_scheme = {"X-Scheme": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_scheme)["remote_protocol"],
"https")
https_forwarded = {"X-Forwarded-Proto": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_forwarded)["remote_protocol"],
"https")
bad_forwarded = {"X-Forwarded-Proto": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_forwarded)["remote_protocol"],
"http")
class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase):
def get_app(self):
return Application([('/', XHeaderTest.Handler)])
def get_httpserver_options(self):
output = super(SSLXHeaderTest, self).get_httpserver_options()
output['xheaders'] = True
return output
def test_request_without_xprotocol(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "https")
http_scheme = {"X-Scheme": "http"}
self.assertEqual(
self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http")
bad_scheme = {"X-Scheme": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https")
class ManualProtocolTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(protocol='https')
def test_manual_protocol(self):
self.assertEqual(self.fetch_json('/')['protocol'], 'https')
@unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin',
"unix sockets not supported on this platform")
class UnixSocketTest(AsyncTestCase):
"""HTTPServers can listen on Unix sockets too.
Why would you want to do this? Nginx can proxy to backends listening
on unix sockets, for one thing (and managing a namespace for unix
sockets can be easier than managing a bunch of TCP port numbers).
Unfortunately, there's no way to specify a unix socket in a url for
an HTTP client, so we have to test this by hand.
"""
def setUp(self):
super(UnixSocketTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.sockfile = os.path.join(self.tmpdir, "test.sock")
sock = netutil.bind_unix_socket(self.sockfile)
app = Application([("/hello", HelloWorldRequestHandler)])
self.server = HTTPServer(app, io_loop=self.io_loop)
self.server.add_socket(sock)
self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
self.stream.connect(self.sockfile, self.stop)
self.wait()
def tearDown(self):
self.stream.close()
self.server.stop()
shutil.rmtree(self.tmpdir)
super(UnixSocketTest, self).tearDown()
def test_unix_socket(self):
self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
self.stream.read_until(b"\r\n", self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.1 200 OK\r\n")
self.stream.read_until(b"\r\n\r\n", self.stop)
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
self.stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Hello world")
def test_unix_socket_bad_request(self):
# Unix sockets don't have remote addresses so they just return an
# empty string.
with ExpectLog(gen_log, "Malformed HTTP message from"):
self.stream.write(b"garbage\r\n\r\n")
self.stream.read_until_close(self.stop)
response = self.wait()
self.assertEqual(response, b"")
class KeepAliveTest(AsyncHTTPTestCase):
"""Tests various scenarios for HTTP 1.1 keep-alive support.
These tests don't use AsyncHTTPClient because we want to control
connection reuse and closing.
"""
def get_app(self):
class HelloHandler(RequestHandler):
def get(self):
self.finish('Hello world')
def post(self):
self.finish('Hello world')
class LargeHandler(RequestHandler):
def get(self):
# 512KB should be bigger than the socket buffers so it will
# be written out in chunks.
self.write(''.join(chr(i % 256) * 1024 for i in range(512)))
class FinishOnCloseHandler(RequestHandler):
@asynchronous
def get(self):
self.flush()
def on_connection_close(self):
# This is not very realistic, but finishing the request
# from the close callback has the right timing to mimic
# some errors seen in the wild.
self.finish('closed')
return Application([('/', HelloHandler),
('/large', LargeHandler),
('/finish_on_close', FinishOnCloseHandler)])
def setUp(self):
super(KeepAliveTest, self).setUp()
self.http_version = b'HTTP/1.1'
def tearDown(self):
# We just closed the client side of the socket; let the IOLoop run
# once to make sure the server side got the message.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
if hasattr(self, 'stream'):
self.stream.close()
super(KeepAliveTest, self).tearDown()
# The next few methods are a crude manual http client
def connect(self):
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def read_headers(self):
self.stream.read_until(b'\r\n', self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b'HTTP/1.1 200'), first_line)
self.stream.read_until(b'\r\n\r\n', self.stop)
header_bytes = self.wait()
headers = HTTPHeaders.parse(header_bytes.decode('latin1'))
return headers
def read_response(self):
self.headers = self.read_headers()
self.stream.read_bytes(int(self.headers['Content-Length']), self.stop)
body = self.wait()
self.assertEqual(b'Hello world', body)
def close(self):
self.stream.close()
del self.stream
def test_two_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.close()
def test_request_close(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.close()
# keepalive is supported for http 1.0 too, but it's opt-in
def test_http10(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertTrue('Connection' not in self.headers)
self.close()
def test_http10_keepalive(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_http10_keepalive_extra_crlf(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_pipelined_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
self.read_response()
self.read_response()
self.close()
def test_pipelined_cancel(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
# only read once
self.read_response()
self.close()
def test_cancel_during_download(self):
self.connect()
self.stream.write(b'GET /large HTTP/1.1\r\n\r\n')
self.read_headers()
self.stream.read_bytes(1024, self.stop)
self.wait()
self.close()
def test_finish_while_closed(self):
self.connect()
self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n')
self.read_headers()
self.close()
def test_keepalive_chunked(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n0\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
class GzipBaseTest(object):
def get_app(self):
return Application([('/', EchoHandler)])
def post_gzip(self, body):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
gzip_file.write(utf8(body))
gzip_file.close()
compressed_body = bytesio.getvalue()
return self.fetch('/', method='POST', body=compressed_body,
headers={'Content-Encoding': 'gzip'})
def test_uncompressed(self):
response = self.fetch('/', method='POST', body='foo=bar')
self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
def get_httpserver_options(self):
return dict(decompress_request=True)
def test_gzip(self):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
def test_gzip_unsupported(self):
# Gzip support is opt-in; without it the server fails to parse
# the body (but parsing form bodies is currently just a log message,
# not a fatal error).
with ExpectLog(gen_log, "Unsupported Content-Encoding"):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {})
class StreamingChunkSizeTest(AsyncHTTPTestCase):
# 50 characters long, and repetitive so it can be compressed.
BODY = b'01234567890123456789012345678901234567890123456789'
CHUNK_SIZE = 16
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def get_httpserver_options(self):
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def headers_received(self, start_line, headers):
self.chunk_lengths = []
def data_received(self, chunk):
self.chunk_lengths.append(len(chunk))
def finish(self):
response_body = utf8(json_encode(self.chunk_lengths))
self.connection.write_headers(
ResponseStartLine('HTTP/1.1', 200, 'OK'),
HTTPHeaders({'Content-Length': str(len(response_body))}))
self.connection.write(response_body)
self.connection.finish()
def get_app(self):
class App(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return StreamingChunkSizeTest.MessageDelegate(request_conn)
return App()
def fetch_chunk_sizes(self, **kwargs):
response = self.fetch('/', method='POST', **kwargs)
response.rethrow()
chunks = json_decode(response.body)
self.assertEqual(len(self.BODY), sum(chunks))
for chunk_size in chunks:
self.assertLessEqual(chunk_size, self.CHUNK_SIZE,
'oversized chunk: ' + str(chunks))
self.assertGreater(chunk_size, 0,
'empty chunk: ' + str(chunks))
return chunks
def compress(self, body):
bytesio = BytesIO()
gzfile = gzip.GzipFile(mode='w', fileobj=bytesio)
gzfile.write(body)
gzfile.close()
compressed = bytesio.getvalue()
if len(compressed) >= len(body):
raise Exception("body did not shrink when compressed")
return compressed
def test_regular_body(self):
chunks = self.fetch_chunk_sizes(body=self.BODY)
# Without compression we know exactly what to expect.
self.assertEqual([16, 16, 16, 2], chunks)
def test_compressed_body(self):
self.fetch_chunk_sizes(body=self.compress(self.BODY),
headers={'Content-Encoding': 'gzip'})
# Compression creates irregular boundaries so the assertions
# in fetch_chunk_sizes are as specific as we can get.
def test_chunked_body(self):
def body_producer(write):
write(self.BODY[:20])
write(self.BODY[20:])
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
# HTTP chunk boundaries translate to application-visible breaks
self.assertEqual([16, 4, 16, 14], chunks)
def test_chunked_compressed(self):
compressed = self.compress(self.BODY)
self.assertGreater(len(compressed), 20)
def body_producer(write):
write(compressed[:20])
write(compressed[20:])
self.fetch_chunk_sizes(body_producer=body_producer,
headers={'Content-Encoding': 'gzip'})
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(max_header_size=1024)
def test_small_headers(self):
response = self.fetch("/", headers={'X-Filler': 'a' * 100})
response.rethrow()
self.assertEqual(response.body, b"Hello world")
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read", required=False):
response = self.fetch("/", headers={'X-Filler': 'a' * 1000})
# 431 is "Request Header Fields Too Large", defined in RFC
# 6585. However, many implementations just close the
# connection in this case, resulting in a 599.
self.assertIn(response.code, (431, 599))
@skipOnTravis
class IdleTimeoutTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(idle_connection_timeout=0.1)
def setUp(self):
super(IdleTimeoutTest, self).setUp()
self.streams = []
def tearDown(self):
super(IdleTimeoutTest, self).tearDown()
for stream in self.streams:
stream.close()
def connect(self):
stream = IOStream(socket.socket())
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
self.streams.append(stream)
return stream
def test_unused_connection(self):
stream = self.connect()
stream.set_close_callback(self.stop)
self.wait()
def test_idle_after_use(self):
stream = self.connect()
stream.set_close_callback(lambda: self.stop("closed"))
# Use the connection twice to make sure keep-alives are working
for i in range(2):
stream.write(b"GET / HTTP/1.1\r\n\r\n")
stream.read_until(b"\r\n\r\n", self.stop)
self.wait()
stream.read_bytes(11, self.stop)
data = self.wait()
self.assertEqual(data, b"Hello world")
# Now let the timeout trigger and close the connection.
data = self.wait()
self.assertEqual(data, "closed")
class BodyLimitsTest(AsyncHTTPTestCase):
def get_app(self):
class BufferedHandler(RequestHandler):
def put(self):
self.write(str(len(self.request.body)))
@stream_request_body
class StreamingHandler(RequestHandler):
def initialize(self):
self.bytes_read = 0
def prepare(self):
if 'expected_size' in self.request.arguments:
self.request.connection.set_max_body_size(
int(self.get_argument('expected_size')))
if 'body_timeout' in self.request.arguments:
self.request.connection.set_body_timeout(
float(self.get_argument('body_timeout')))
def data_received(self, data):
self.bytes_read += len(data)
def put(self):
self.write(str(self.bytes_read))
return Application([('/buffered', BufferedHandler),
('/streaming', StreamingHandler)])
def get_httpserver_options(self):
return dict(body_timeout=3600, max_body_size=4096)
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_small_body(self):
response = self.fetch('/buffered', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
response = self.fetch('/streaming', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
def test_large_body_buffered(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/buffered', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_buffered_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/buffered', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/streaming', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_streaming_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/streaming', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body=b'a' * 10240)
self.assertEqual(response.body, b'10240')
def test_large_body_streaming_chunked_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.body, b'10240')
@gen_test
def test_timeout(self):
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream because AsyncHTTPClient won't let us read a
# response without finishing a body.
stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n'
b'Content-Length: 42\r\n\r\n')
with ExpectLog(gen_log, 'Timeout reading body'):
response = yield stream.read_until_close()
self.assertEqual(response, b'')
finally:
stream.close()
@gen_test
def test_body_size_override_reset(self):
# The max_body_size override is reset between requests.
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream so we can make sure it's all on one connection.
stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
stream.write(b'a' * 10240)
headers, response = yield gen.Task(read_stream_body, stream)
self.assertEqual(response, b'10240')
# Without the ?expected_size parameter, we get the old default value
stream.write(b'PUT /streaming HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
with ExpectLog(gen_log, '.*Content-Length too long'):
data = yield stream.read_until_close()
self.assertEqual(data, b'')
finally:
stream.close()
class LegacyInterfaceTest(AsyncHTTPTestCase):
def get_app(self):
# The old request_callback interface does not implement the
# delegate interface, and writes its response via request.write
# instead of request.connection.write_headers.
def handle_request(request):
self.http1 = request.version.startswith("HTTP/1.")
if not self.http1:
# This test will be skipped if we're using HTTP/2,
# so just close it out cleanly using the modern interface.
request.connection.write_headers(
ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
message = b"Hello world"
request.write(utf8("HTTP/1.1 200 OK\r\n"
"Content-Length: %d\r\n\r\n" % len(message)))
request.write(message)
request.finish()
return handle_request
def test_legacy_interface(self):
response = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(response.body, b"Hello world")
|
|
#!/usr/bin/env python
"""
chex
Indexes chess game states from one or more PGN files
(https://en.wikipedia.org/wiki/Portable_Game_Notation) with Spotify's annoy
(https://github.com/spotify/annoy) so the user can search for game states
similar to a game state they input as well as the games in which they're
found.
Requires https://pypi.python.org/pypi/python-chess,
https://github.com/spotify/annoy, and https://pypi.python.org/pypi/sqlitedict.
"""
import chess
import chess.pgn
import struct
import binascii
import argparse
import errno
import os
import sys
import time
import random
import atexit
import shutil
import copy
import tempfile
import logging
from math import sqrt
from annoy import AnnoyIndex
from sqlitedict import SqliteDict
_help_intro = """chex is a search engine for chess game states."""
def help_formatter(prog):
""" So formatter_class's max_help_position can be changed. """
return argparse.HelpFormatter(prog, max_help_position=40)
# For bitboard conversion
_offsets = {
'p' : 0,
'P' : 1,
'n' : 2,
'N' : 3,
'b' : 4,
'B' : 5,
'k' : 6,
'K' : 7,
'r' : 8,
'R' : 9,
'q' : 10,
'Q' : 11,
}
_reverse_offsets = { value : key for key, value in _offsets.items() }
_reverse_colors_offsets = {
'p' : 1,
'P' : 0,
'n' : 3,
'N' : 2,
'b' : 5,
'B' : 4,
'k' : 7,
'K' : 6,
'r' : 9,
'R' : 8,
'q' : 11,
'Q' : 10,
}
_bitboard_length = 768
def board_to_bitboard(board):
""" Converts chess module's board to bitboard game state representation.
node: game object of type chess.pgn.Game
Return value: binary vector of length _bitboard_length as Python list
"""
bitboard = [0 for _ in xrange(_bitboard_length)]
for i in xrange(64):
try:
bitboard[i*12 + _offsets[board.piece_at(i).symbol()]] = 1
except AttributeError:
pass
return bitboard
def bitboard_to_board(bitboard):
""" Converts bitboard to board.
TODO: unit test.
bitboard: iterable of _bitboard_length 1s and 0s
Return value: chess.Board representation of bitboard
"""
fen = [[] for _ in xrange(8)]
for i in xrange(8):
streak = 0
for j in xrange(8):
segment = 12*(8*i + j)
piece = None
for offset in xrange(12):
if bitboard[segment + offset]:
piece = _reverse_offsets[offset]
if piece is not None:
if streak: fen[i].append(str(streak))
fen[i].append(piece)
streak = 0
else:
streak += 1
if j == 7: fen[i].append(str(streak))
return chess.Board(
'/'.join([''.join(row) for row in fen][::-1]) + ' w KQkq - 0 1'
)
def bitboard_to_key(bitboard):
""" Converts bitboard to ASCII representation used as key in SQL database.
bitboard: bitboard representation of chess board
Return value: ASCII representation of bitboard
"""
to_unhexlify = '%x' % int(''.join(map(str, map(int, bitboard))), 2)
try:
return binascii.unhexlify(to_unhexlify)
except TypeError:
return binascii.unhexlify('0' + to_unhexlify)
def key_to_bitboard(key):
""" Converts ASCII representation of board to bitboard.
key: ASCII representation of bitboard
Return value: bitboard (binary list)
"""
unpadded = [
int(digit) for digit in bin(int(binascii.hexlify(key), 16))[2:]]
return [0 for _ in xrange(_bitboard_length - len(unpadded))] + unpadded
def invert_board(board):
""" Computes bitboard of given position but with inverted colors. """
inversevector = [0 for _ in xrange(_bitboard_length)]
for i in xrange(64):
try:
inversevector[i * 12
+ _reverse_colors_offsets[board.piece_at(i).symbol()]] = 1
except AttributeError:
pass
return inversevector
def flip_board(board):
""" Computes bitboard of the mirror image of a given position. """
flipvector = [0 for _ in xrange(_bitboard_length)]
for i in range(8):
for j in range(8):
try:
flipvector[12*(8*i + 7 - j)
+ _offsets[board.piece_at(8*i + j).symbol()]] = 1
except AttributeError:
pass
return flipvector
def reverse_and_flip(board):
""" Computes bitboard after flipping position and reversing colors.
board: object of type chess.Board
Return value: flipped bitboard
"""
reversevector = [0 for _ in xrange(_bitboard_length)]
for i in range(8):
for j in range(8):
try:
reversevector[12*(8*i + 7 - j)
+ _reverse_colors_offsets[
board.piece_at(8*i + j).symbol()]] = 1
except AttributeError:
pass
return reversevector
class ChexIndex(object):
""" Manages game states from Annoy Index and SQL database. """
def __init__(self, chex_index, id_label='FICSGamesDBGameNo',
first_indexed_move=10, n_trees=200, seed=1,
scratch=None, learning_rate=1, min_iterations=100,
max_iterations=5000000, difference=.1):
""" Number of dimensions is always 8 x 8 x 12; there are 6 black piece
types, six white piece types, and the board is 8 x 8."""
self.annoy_index = AnnoyIndex(_bitboard_length, metric='angular')
self.id_label = id_label
self.first_indexed_move = first_indexed_move
self.chex_index = chex_index
try:
os.makedirs(self.chex_index)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Create temporary directory
if scratch is not None:
try:
os.makedirs(self.scratch)
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.scratch = tempfile.mkdtemp(dir=scratch)
# Schedule temporary directory for deletion
atexit.register(shutil.rmtree, self.scratch, ignore_errors=True)
self.chex_sql = SqliteDict(
os.path.join(self.chex_index, 'sqlite.idx'))
self.game_sql = SqliteDict(
os.path.join(self.scratch, 'temp.idx')
)
self.game_number = 0
self.n_trees = n_trees
# For reproducibly randomly drawing boards
self.seed = seed
self.learning_rate = learning_rate
self.min_iterations = min_iterations
self.max_iterations = max_iterations
self.difference = difference
self.weights = [1. for _ in xrange(_bitboard_length)]
def add_game(self, node):
""" Adds game parsed by chess library to chex SQL database.
node: game object of type chess.pgn.Game
Return value: 0 if game added successfully, else 1
"""
if node is None:
return 1
game_id = node.headers[self.id_label]
move_number = 0
for move_number in xrange(self.first_indexed_move - 1):
try:
node = node.variations[0]
except IndexError:
# Too few moves to index
return 0
while True:
move_number += 1
bitboard = board_to_bitboard(node.board())
inversevector = invert_board(node.board())
flipvector = flip_board(node.board())
reversevector = reverse_and_flip(node.board())
# Store as ASCII; use minimum of strategically equivalent boards
# See https://github.com/samirsen/chex/issues/1 for details
key = min(map(bitboard_to_key,
[bitboard, inversevector, flipvector, reversevector]))
if key in self.chex_index:
self.chex_sql[key] = self.chex_sql[key] + [
(game_id, move_number)
]
else:
self.chex_sql[key] = [(game_id, move_number)]
if self.game_number in self.game_sql:
self.game_sql[self.game_number] = self.game_sql[
self.game_number] + [key]
else:
self.game_sql[self.game_number] = [key]
if node.is_end(): break
node = node.variations[0]
self.game_number += 1
return 0
def _mahalanobis_loss(self,
reference_bitboard, plus_bitboard, minus_bitboard):
""" Computes value of loss function for finding Mahalanobis metric.
reference_bitboard, plus_bitboard, minus_bitboard: explained
in algo
Return value: value of loss function
"""
return max(0.,
1. + sum([minus_bitboard[i]
* reference_bitboard[i] * self.weights[i]
for i in xrange(_bitboard_length)])
- sum([plus_bitboard[i]
* reference_bitboard[i] * self.weights[i]
for i in xrange(_bitboard_length)]))
def _mahalanobis(self):
""" Computes sparse Mahalanobis metric using algorithm from paper.
The reference is SOML: Sparse online metric learning with
application to image retrieval by Gao et al. We implement
their algorithm 1: SOML-TG (sparse online metric learning via
truncated gradient). We set lambda = 0 and use no
sparsity-promoting regularization term.
Return value: diagonal of Mahalanobis metric
"""
# Finalize game SQL database for querying
self.game_sql.commit()
# For reproducible random draws from database
random.seed(self.seed)
last_weights = [0 for _ in xrange(_bitboard_length)]
iteration, critical_iteration = 0, self.min_iterations
whatever = 0
while True:
# Draw game
game_index = random.randint(0, self.game_number - 1)
# Check that the sampled boards are shuffled
# Is the Python algo reservoir sampling? If so yes.
try:
[reference_bitboard,
plus_bitboard, minus_bitboard] = random.sample(
list(
enumerate(
map(key_to_bitboard, self.game_sql[game_index])
)
), 3
)
except ValueError:
# Not enough moves in game to index
continue
reference_bitboard, plus_bitboard, minus_bitboard = (
list(reference_bitboard), list(plus_bitboard),
list(minus_bitboard)
)
for bitboard in reference_bitboard, plus_bitboard, minus_bitboard:
norm_constant = 1. / sqrt(sum(bitboard[1]))
bitboard[1] = [component * norm_constant
for component in bitboard[1]]
if abs(minus_bitboard[0] - reference_bitboard[0]) < abs(
plus_bitboard[0] - reference_bitboard[0]):
minus_bitboard, plus_bitboard = plus_bitboard, minus_bitboard
if self._mahalanobis_loss(reference_bitboard[1],
plus_bitboard[1], minus_bitboard[1]
) > 0:
v = [self.weights[i] - self.learning_rate
* reference_bitboard[1][i]
* (plus_bitboard[1][i] - minus_bitboard[1][i])
for i in xrange(_bitboard_length)]
self.weights = [max(0, v[j]) if v[j] >= 0 else min(0, v[j])
for j in xrange(_bitboard_length)]
iteration += 1
if iteration >= critical_iteration:
print critical_iteration
print self.weights
if sqrt(sum([(last_weights[i] - self.weights[i])**2
for i in xrange(_bitboard_length)])) <= (
self.difference):
# Must sqrt so angular distance in annoy works
self.weights = [sqrt(weight) for weight in self.weights]
break
last_weights = copy.copy(self.weights)
critical_iteration *= 2
if iteration >= self.max_iterations:
# Must sqrt so angular distance in annoy works
print self.weights
self.weights = [sqrt(weight) for weight in self.weights]
break
def _annoy_index(self):
""" Adds all boards from chex SQL database to Annoy index
No return value.
"""
for i, key in enumerate(self.chex_sql):
bitboard = key_to_bitboard(key)
self.annoy_index.add_item(i, [self.weights[j] * bitboard[j]
for j in xrange(_bitboard_length)])
def save(self):
# Compute Mahalanobis matrix
self._mahalanobis()
# Create annoy index
self._annoy_index()
self.annoy_index.build(self.n_trees)
# Save all index files
super(ChexIndex, self).save(
os.path.join(self.chex_index, 'annoy.idx')
)
self.chex_sql.commit()
self.chex_sql.close()
self.game_sql.close()
# Clean up
shutil.rmtree(self.scratch, ignore_errors=True)
class ChexSearch(object):
""" Searches Chex index for game states and associated games. """
#TODO: Combine results of board transforms with binary search algo.
def __init__(self, chex_index, results=10, search_k=40):
self.chex_index = chex_index
self.results = results
self.search_k = search_k
self.annoy_index = AnnoyIndex(_bitboard_length, metric='angular')
self.annoy_index.load(os.path.join(self.chex_index, 'annoy.idx'))
self.chex_sql = SqliteDict(
os.path.join(self.chex_index, 'sqlite.idx'))
def search(self, board):
""" Searches for board.
board: game object of type chess.Board
Return value: [
(board, similarity score, [(game_id, move number), ...]), ...]
"""
symmetrical_boards = [board_to_bitboard(board),
invert_board(board),
flip_board(board),
reverse_and_flip(board)]
results = []
for bitboard in symmetrical_boards:
for annoy_id, similarity in zip(
*self.annoy_index.get_nns_by_vector(
bitboard, self.results,
include_distances=True
)):
# Recompute ASCII key
bitboard = self.annoy_index.get_item_vector(annoy_id)
to_unhexlify = '%x' % int(''.join(
map(str, map(int, bitboard))), 2)
try:
key = binascii.unhexlify(to_unhexlify)
except TypeError:
key = binascii.unhexlify('0' + to_unhexlify)
results.append((bitboard_to_board(bitboard), similarity,
self.chex_sql[key]))
return results
def close(self):
del self.annoy_index
if __name__ == '__main__':
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=_help_intro,
formatter_class=help_formatter)
subparsers = parser.add_subparsers(help=(
'subcommands; add "-h" or "--help" '
'after a subcommand for its parameters'),
dest='subparser_name'
)
index_parser = subparsers.add_parser(
'index',
help='creates index of chess game states'
)
search_parser = subparsers.add_parser(
'search',
help=('searches for chess game states similar to '
'those input by user')
)
index_parser.add_argument('-f', '--first-indexed-move',
metavar='<int>', type=int, required=False,
default=10,
help=('indexes only those game states at least this many moves '
'into a given game')
)
index_parser.add_argument('-p', '--pgns', metavar='<files>', nargs='+',
required=True, type=str,
help='space-separated list of PGNs to index'
)
index_parser.add_argument('-i', '--id-label', metavar='<str>',
required=False, type=str,
default='FICSGamesDBGameNo',
help='game ID label from metadata in PGN files'
)
index_parser.add_argument('-x', '--chex-index', metavar='<dir>',
required=True, type=str,
help='directory in which to store chex index files'
)
# Test various values!
index_parser.add_argument('--n-trees', metavar='<int>', type=int,
required=False,
default=200,
help='number of annoy trees'
)
index_parser.add_argument('--scratch', metavar='<dir>', type=str,
required=False,
default=None,
help=('where to store temporary files; default is securely '
'created directory in $TMPDIR or similar'))
index_parser.add_argument('--learning-rate', metavar='<dec>', type=float,
required=False,
default=1,
help='learning rate for Mahalanobis metric')
index_parser.add_argument('--min-iterations', metavar='<int>', type=int,
required=False,
default=100,
help='minimum number of iterations for learning Mahalanobis metric'
)
index_parser.add_argument('--max-iterations', metavar='<int>', type=int,
required=False,
default=100,
help='maximum number of iterations for learning Mahalanobis metric'
)
index_parser.add_argument('--difference', metavar='<dec>', type=float,
required=False,
default=.1,
help=('maximum Euclidean distance between Mahalanobis matrices '
'for deciding convergence')
)
search_parser.add_argument('-f', '--board-fen', metavar='<file>',
required=True, type=str,
help='first field of FEN describing board to search for')
search_parser.add_argument('-x', '--chex-index', metavar='<dir>',
required=True, type=str,
help='chex index directory'
)
# Test various values!
search_parser.add_argument('--search-k', metavar='<int>',
required=False, type=int,
default=-1,
help='annoy search-k; default is results * n_trees'
)
search_parser.add_argument('--results', metavar='<int>',
required=False, type=int,
default=10,
help='maximum number of returned game states'
)
parser.add_argument('--verbose', action='store_const', const=True,
default=False,
help='be talkative'
)
args = parser.parse_args()
# Configure this a little later
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO,
format='%(asctime)s %(levelname)-10s %(message)s',
datefmt='%m-%d-%Y %H:%M:%S')
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
if args.subparser_name == 'index':
index = ChexIndex(args.chex_index, id_label=args.id_label,
first_indexed_move=args.first_indexed_move,
n_trees=args.n_trees, scratch=args.scratch,
learning_rate=args.learning_rate,
min_iterations=args.min_iterations,
max_iterations=args.max_iterations,
difference=args.difference)
for pgn in args.pgns:
game_count = 0
with open(pgn) as pgn_stream:
while True:
if index.add_game(chess.pgn.read_game(pgn_stream)):
break
game_count += 1
print 'Read {} games...\r'.format(game_count),
sys.stdout.flush()
# TODO: clean up display of this
print 'Read {} games.'.format(game_count)
index.save()
else:
assert args.subparser_name == 'search'
searcher = ChexSearch(args.chex_index,
results=args.results, search_k=args.search_k)
# Pretty print results
print '\t'.join(
['rank', 'board FEN', 'similarity score', 'games',
'move numbers']
)
for (rank, (board, similarity, games)) in enumerate(searcher.search(
chess.Board(args.board_fen + ' w KQkq - 0 1')
)):
games = zip(*games)
print '\t'.join([
str(rank + 1), board.board_fen(), str(similarity),
','.join(games[0]), ','.join(map(str, games[1]))
])
# Close may avoid shutdown exception for unknown reason
searcher.close()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Conference'
db.create_table(u'conference', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('session', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('start_date', self.gf('django.db.models.fields.DateField')()),
('end_date', self.gf('django.db.models.fields.DateField')()),
('reg_open', self.gf('django.db.models.fields.DateField')()),
('early_reg_close', self.gf('django.db.models.fields.DateField')()),
('reg_close', self.gf('django.db.models.fields.DateField')()),
('min_attendance', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('max_attendance', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('open_reg', self.gf('django.db.models.fields.BooleanField')(default=True)),
('waitlist_reg', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'core', ['Conference'])
# Adding model 'Country'
db.create_table(u'country', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('special', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'core', ['Country'])
# Adding model 'Committee'
db.create_table(u'committee', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=8)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('delegation_size', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=2)),
('special', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'core', ['Committee'])
# Adding model 'School'
db.create_table(u'school', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('registered', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('address', self.gf('django.db.models.fields.CharField')(max_length=128)),
('city', self.gf('django.db.models.fields.CharField')(max_length=128)),
('state', self.gf('django.db.models.fields.CharField')(max_length=16)),
('zip_code', self.gf('django.db.models.fields.CharField')(max_length=16)),
('country', self.gf('django.db.models.fields.CharField')(max_length=64)),
('primary_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('primary_gender', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=4)),
('primary_email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('primary_phone', self.gf('django.db.models.fields.CharField')(max_length=32)),
('primary_type', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=2)),
('secondary_name', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('secondary_gender', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=4, blank=True)),
('secondary_email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('secondary_phone', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('secondary_type', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=2, blank=True)),
('program_type', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('times_attended', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('international', self.gf('django.db.models.fields.BooleanField')(default=False)),
('waitlist', self.gf('django.db.models.fields.BooleanField')(default=False)),
('beginner_delegates', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('intermediate_delegates', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('advanced_delegates', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('spanish_speaking_delegates', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('prefers_bilingual', self.gf('django.db.models.fields.BooleanField')(default=False)),
('prefers_crisis', self.gf('django.db.models.fields.BooleanField')(default=False)),
('prefers_small_specialized', self.gf('django.db.models.fields.BooleanField')(default=False)),
('prefers_mid_large_specialized', self.gf('django.db.models.fields.BooleanField')(default=False)),
('registration_comments', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('registration_fee', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=6, decimal_places=2)),
('registration_fee_paid', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=6, decimal_places=2)),
('registration_fee_balance', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=6, decimal_places=2)),
('delegation_fee', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=6, decimal_places=2)),
('delegation_fee_paid', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=6, decimal_places=2)),
('delegation_fee_balance', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=6, decimal_places=2)),
))
db.send_create_signal(u'core', ['School'])
# Adding model 'Assignment'
db.create_table(u'assignment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('committee', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Committee'])),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Country'])),
('school', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['core.School'], null=True, blank=True)),
))
db.send_create_signal(u'core', ['Assignment'])
# Adding model 'CountryPreference'
db.create_table(u'country_preference', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('school', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.School'])),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Country'])),
('rank', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
))
db.send_create_signal(u'core', ['CountryPreference'])
# Adding model 'DelegateSlot'
db.create_table(u'delegate_slot', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('assignment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Assignment'])),
('attended_session1', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('attended_session2', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('attended_session3', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('attended_session4', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
))
db.send_create_signal(u'core', ['DelegateSlot'])
# Adding model 'Delegate'
db.create_table(u'delegate', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('delegate_slot', self.gf('django.db.models.fields.related.OneToOneField')(default=None, related_name='delegate', unique=True, null=True, to=orm['core.DelegateSlot'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(default='', null=True)),
))
db.send_create_signal(u'core', ['Delegate'])
def backwards(self, orm):
# Deleting model 'Conference'
db.delete_table(u'conference')
# Deleting model 'Country'
db.delete_table(u'country')
# Deleting model 'Committee'
db.delete_table(u'committee')
# Deleting model 'School'
db.delete_table(u'school')
# Deleting model 'Assignment'
db.delete_table(u'assignment')
# Deleting model 'CountryPreference'
db.delete_table(u'country_preference')
# Deleting model 'DelegateSlot'
db.delete_table(u'delegate_slot')
# Deleting model 'Delegate'
db.delete_table(u'delegate')
models = {
u'core.assignment': {
'Meta': {'object_name': 'Assignment', 'db_table': "u'assignment'"},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Committee']"}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['core.School']", 'null': 'True', 'blank': 'True'})
},
u'core.committee': {
'Meta': {'object_name': 'Committee', 'db_table': "u'committee'"},
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Country']", 'through': u"orm['core.Assignment']", 'symmetrical': 'False'}),
'delegation_size': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.conference': {
'Meta': {'object_name': 'Conference', 'db_table': "u'conference'"},
'early_reg_close': ('django.db.models.fields.DateField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_attendance': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'min_attendance': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'open_reg': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reg_close': ('django.db.models.fields.DateField', [], {}),
'reg_open': ('django.db.models.fields.DateField', [], {}),
'session': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'waitlist_reg': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.country': {
'Meta': {'object_name': 'Country', 'db_table': "u'country'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.countrypreference': {
'Meta': {'ordering': "['rank']", 'object_name': 'CountryPreference', 'db_table': "u'country_preference'"},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.School']"})
},
u'core.delegate': {
'Meta': {'object_name': 'Delegate', 'db_table': "u'delegate'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'delegate_slot': ('django.db.models.fields.related.OneToOneField', [], {'default': 'None', 'related_name': "'delegate'", 'unique': 'True', 'null': 'True', 'to': u"orm['core.DelegateSlot']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'})
},
u'core.delegateslot': {
'Meta': {'ordering': "['assignment__country']", 'object_name': 'DelegateSlot', 'db_table': "u'delegate_slot'"},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Assignment']"}),
'attended_session1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'attended_session2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'attended_session3': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'attended_session4': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'core.school': {
'Meta': {'object_name': 'School', 'db_table': "u'school'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'advanced_delegates': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'beginner_delegates': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'countrypreferences': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Country']", 'through': u"orm['core.CountryPreference']", 'symmetrical': 'False'}),
'delegation_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'delegation_fee_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'delegation_fee_paid': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intermediate_delegates': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'prefers_bilingual': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prefers_crisis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prefers_mid_large_specialized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prefers_small_specialized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'primary_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'primary_gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '4'}),
'primary_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'primary_phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'primary_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'program_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'registered': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'registration_comments': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'registration_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'registration_fee_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'registration_fee_paid': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'secondary_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'secondary_gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '4', 'blank': 'True'}),
'secondary_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'secondary_phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'secondary_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2', 'blank': 'True'}),
'spanish_speaking_delegates': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'times_attended': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'waitlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16'})
}
}
complete_apps = ['core']
|
|
import logging
import time
import requests
import six.moves.urllib.parse as urlparse
from .. import client
from ..common import constants
from ..common import exceptions
from ..common import serializer
from ..common import utils
from ..i18n import _
_logger = logging.getLogger(__name__)
def exception_handler_v20(status_code, error_content):
error_dict = None
if isinstance(error_content, dict):
error_dict = {
'type': 'Network',
'message': error_content.get(u'error')
}
bad_esi_error_flag = False
if error_dict:
try:
error_type = error_dict['type']
error_message = error_dict['message']
except Exception:
error_type = 'Network'
error_message = error_dict
if not bad_esi_error_flag:
client_exc = getattr(exceptions, '%sClient' % error_type, None)
if not client_exc:
client_exc = exceptions.HTTP_EXCEPTION_MAP.get(status_code)
if client_exc:
raise client_exc(message=error_message,
status_code=status_code)
else:
raise exceptions.ESIClientException(
status_code=status_code, message=error_message)
else:
raise exceptions.ESIClientException(status_code=status_code,
message=error_dict)
else:
message = None
if isinstance(error_content, dict):
message = error_content.get('message')
if message:
raise exceptions.ESIClientException(status_code=status_code,
message=message)
msg = "%s-%s" % (status_code, error_content)
raise exceptions.ESIClientException(status_code=status_code,
message=msg)
class APIParamsCall(object):
def __init__(self, function):
self.function = function
def __get__(self, instance, owner):
def with_params(*args, **kwargs):
_format = instance.format
if 'format' in kwargs:
instance.format = kwargs['format']
ret = self.function(instance, *args, **kwargs)
instance.format = _format
return ret
return with_params
class ClientBase(object):
EXTED_PLURALS = {}
def __init__(self, **kwargs):
super(ClientBase, self).__init__()
self.retries = kwargs.pop('retries', 0)
self.raise_errors = kwargs.pop('raise_errors', True)
self.httpclient = client.construct_http_client(**kwargs)
self.version = '2.0'
self.format = 'json'
self.action_prefix = "/v%s" % self.version
self.retry_interval = 1
def _handle_fault_response(self, status_code, response_body):
_logger.debug("Error message: %s", response_body)
try:
des_error_body = self.deserialize(response_body, status_code)
except Exception:
des_error_body = {'message': response_body}
exception_handler_v20(status_code, des_error_body)
def do_request(self, method, action, body=None, headers=None, params=None):
action = self.action_prefix + action
if type(params) is dict and params:
params = utils.safe_encode_dict(params)
action += '?' + urlparse.urlencode(params, doseq=1)
if body:
body = self.serialize(body)
resp, replybody = self.httpclient.do_request(
action, method, body=body,
content_type=self.content_type())
status_code = resp.status_code
if status_code in (requests.codes.ok,
requests.codes.created,
requests.codes.accepted,
requests.codes.no_content):
return self.deserialize(replybody, status_code)
else:
if not replybody:
replybody = resp.reason
self._handle_fault_response(status_code, replybody)
def get_auth_info(self):
return self.httpclient.get_auth_info()
def serialize(self, data):
if data is None:
return None
elif type(data) is dict:
return serializer.Serializer(
self.get_attr_metadata()).serialize(data, self.content_type())
else:
raise Exception(_("Unable to serialize object of type = '%s'") %
type(data))
def deserialize(self, data, status_code):
if status_code == 204:
return data
return serializer.Serializer(self.get_attr_metadata()).deserialize(
data, self.content_type())['body']
def get_attr_metadata(self):
if self.format == 'json':
return {}
old_request_format = self.format
self.format = 'json'
exts = self.list_extensions()['extensions']
self.format = old_request_format
ns = dict([(ext['alias'], ext['namespace']) for ext in exts])
self.EXTED_PLURALS.update(constants.PLURALS)
return {'plurals': self.EXTED_PLURALS,
'xmlns': constants.XML_NS_V20,
constants.EXT_NS: ns}
def content_type(self, _format=None):
_format = _format or self.format
return "application/%s" % _format
def retry_request(self, method, action, body=None,
headers=None, params=None):
max_attempts = self.retries + 1
for i in range(max_attempts):
try:
return self.do_request(method, action, body=body,
headers=headers, params=params)
except exceptions.ConnectionFailed:
if i < self.retries:
_logger.debug('Retrying connection to ESI service')
time.sleep(self.retry_interval)
elif self.raise_errors:
raise
if self.retries:
msg = (_("Failed to connect to ESI server after %d attempts")
% max_attempts)
else:
msg = _("Failed to connect ESI server")
raise exceptions.ConnectionFailed(reason=msg)
def delete(self, action, body=None, headers=None, params=None):
return self.retry_request("DELETE", action, body=body,
headers=headers, params=params)
def get(self, action, body=None, headers=None, params=None):
return self.retry_request("GET", action, body=body,
headers=headers, params=params)
def post(self, action, body=None, headers=None, params=None):
return self.do_request("POST", action, body=body,
headers=headers, params=params)
def put(self, action, body=None, headers=None, params=None):
return self.retry_request("PUT", action, body=body,
headers=headers, params=params)
def list(self, collection, path, retrieve_all=True, **params):
if retrieve_all:
res = []
for r in self._pagination(collection, path, **params):
res.extend(r[collection])
return {collection: res}
else:
return self._pagination(collection, path, **params)
def _pagination(self, collection, path, **params):
if params.get('page_reverse', False):
linkrel = 'previous'
else:
linkrel = 'next'
next = True
while next:
res = self.get(path, params=params)
yield res
next = False
try:
for link in res['%s_links' % collection]:
if link['rel'] == linkrel:
query_str = urlparse.urlparse(link['href']).query
params = urlparse.parse_qs(query_str)
next = True
break
except KeyError:
break
class Client(ClientBase):
networks_path = "/networks"
network_path = "/networks/%s"
ports_path = "/ports"
port_path = "/ports/%s"
subnets_path = "/subnets"
subnet_path = "/subnets/%s"
subnetpools_path = "/subnetpools"
subnetpool_path = "/subnetpools/%s"
quotas_path = "/quotas"
quota_path = "/quotas/%s"
extensions_path = "/extensions"
extension_path = "/extensions/%s"
physical_ports_path = "/physical_ports"
physical_port_path = "/physical_ports/%s"
gw_interfaces_path = '/gw_interfaces'
gw_interface_path = '/gw_interfaces/%s'
vpn_interfaces_path = '/vpn_interfaces'
vpn_interface_path = '/vpn_interfaces/%s'
fic_interfaces_path = '/fic_interfaces'
fic_interface_path = '/fic_interfaces/%s'
static_routes_path = '/static_routes'
static_route_path = '/static_routes/%s'
internet_services_path = '/internet_services'
internet_service_path = '/internet_services/%s'
vpn_services_path = '/vpn_services'
vpn_service_path = '/vpn_services/%s'
fic_services_path = '/fic_services'
fic_service_path = '/fic_services/%s'
internet_gateways_path = '/internet_gateways'
internet_gateway_path = '/internet_gateways/%s'
vpn_gateways_path = '/vpn_gateways'
vpn_gateway_path = '/vpn_gateways/%s'
fic_gateways_path = '/fic_gateways'
fic_gateway_path = '/fic_gateways/%s'
interdc_gateways_path = "/interdc_gateways"
interdc_gateway_path = "/interdc_gateways/%s"
interdc_services_path = "/interdc_services"
interdc_service_path = "/interdc_services/%s"
interdc_interfaces_path = "/interdc_interfaces"
interdc_interface_path = "/interdc_interfaces/%s"
public_ips_path = '/public_ips'
public_ip_path = '/public_ips/%s'
public_ip_pools_path = '/public_ip_pools'
public_ip_pool_path = '/public_ip_pools/%s'
qos_options_path = '/qos_options'
qos_option_path = '/qos_options/%s'
DHCP_NETS = '/dhcp-networks'
DHCP_AGENTS = '/dhcp-agents'
firewalls_path = '/firewalls'
firewall_path = '/firewalls/%s'
firewall_interfaces_path = '/firewall_interfaces'
firewall_interface_path = '/firewall_interfaces/%s'
firewall_plans_path = '/firewall_plans'
firewall_plan_path = '/firewall_plans/%s'
loadbalancers_path = '/load_balancers'
loadbalancer_path = '/load_balancers/%s'
loadbalancer_plans_path = '/load_balancer_plans'
loadbalancer_plan_path = '/load_balancer_plans/%s'
loadbalancer_interfaces_path = '/load_balancer_interfaces'
loadbalancer_interface_path = '/load_balancer_interfaces/%s'
loadbalancer_syslog_servers_path = '/load_balancer_syslog_servers'
loadbalancer_syslog_server_path = '/load_balancer_syslog_servers/%s'
_cfgw_singular = 'common_function_gateway'
_cfgw_plural = _cfgw_singular + 's'
common_function_gateways_path = '/%s' % _cfgw_plural
common_function_gateway_path = common_function_gateways_path + '/%s'
_common_function_pool_singular = 'common_function_pool'
_common_function_pool_plural = _common_function_pool_singular + 's'
common_function_pools_path = '/%s' % _common_function_pool_plural
common_function_pool_path = common_function_pools_path + '/%s'
_common_function_singular = 'common_function'
_common_function_plural = _common_function_singular + 's'
common_functions_path = '/%s' % _common_function_plural
common_function_path = common_functions_path + '/%s'
reserve_addresses_path = "/reserve_addresses"
reserve_address_path = "/reserve_addresses/%s"
_colo_logical_link_singular = 'colocation_logical_link'
_colo_logical_link_plural = _colo_logical_link_singular + 's'
colocation_logical_links_path = '/%s' % _colo_logical_link_plural
colocation_logical_link_path = colocation_logical_links_path + '/%s'
_colo_physical_link_singular = 'colocation_physical_link'
_colo_physical_link_plural = _colo_physical_link_singular + 's'
colocation_physical_links_path = '/%s' % _colo_physical_link_plural
colocation_physical_link_path = colocation_physical_links_path + '/%s'
_colo_space_singular = 'colocation_space'
_colo_space_plural = _colo_space_singular + 's'
colocation_spaces_path = '/%s' % _colo_space_plural
colocation_space_path = colocation_spaces_path + '/%s'
EXTED_PLURALS = {'routers': 'router',
'floatingips': 'floatingip',
'service_types': 'service_type',
'service_definitions': 'service_definition',
'security_groups': 'security_group',
'security_group_rules': 'security_group_rule',
'ipsecpolicies': 'ipsecpolicy',
'ikepolicies': 'ikepolicy',
'ipsec_site_connections': 'ipsec_site_connection',
'vpnservices': 'vpnservice',
'vips': 'vip',
'pools': 'pool',
'members': 'member',
'health_monitors': 'health_monitor',
'quotas': 'quota',
'service_providers': 'service_provider',
'firewall_rules': 'firewall_rule',
'firewall_policies': 'firewall_policy',
'firewalls': 'firewall',
'metering_labels': 'metering_label',
'metering_label_rules': 'metering_label_rule',
'net_partitions': 'net_partition',
'packet_filters': 'packet_filter',
'loadbalancers': 'loadbalancer',
'listeners': 'listener',
'lbaas_pools': 'lbaas_pool',
'lbaas_healthmonitors': 'lbaas_healthmonitor',
'lbaas_members': 'lbaas_member',
'healthmonitors': 'healthmonitor',
}
@APIParamsCall
def get_quotas_tenant(self, **_params):
return self.get(self.quota_path % 'tenant', params=_params)
@APIParamsCall
def list_quotas(self, **_params):
return self.get(self.quotas_path, params=_params)
@APIParamsCall
def show_quota(self, tenant_id, **_params):
return self.get(self.quota_path % tenant_id, params=_params)
@APIParamsCall
def update_quota(self, tenant_id, body=None):
return self.put(self.quota_path % tenant_id, body=body)
@APIParamsCall
def delete_quota(self, tenant_id):
return self.delete(self.quota_path % tenant_id)
@APIParamsCall
def list_extensions(self, **_params):
return self.get(self.extensions_path, params=_params)
@APIParamsCall
def show_extension(self, ext_alias, **_params):
return self.get(self.extension_path % ext_alias, params=_params)
@APIParamsCall
def list_ports(self, retrieve_all=True, **_params):
return self.list('ports', self.ports_path, retrieve_all,
**_params)
@APIParamsCall
def show_port(self, port, **_params):
return self.get(self.port_path % port, params=_params)
@APIParamsCall
def create_port(self, body=None):
return self.post(self.ports_path, body=body)
@APIParamsCall
def update_port(self, port, body=None):
return self.put(self.port_path % port, body=body)
@APIParamsCall
def delete_port(self, port):
return self.delete(self.port_path % port)
@APIParamsCall
def list_physical_ports(self, retrieve_all=True, **_params):
return self.list('physical_ports',
self.physical_ports_path,
retrieve_all,
**_params)
@APIParamsCall
def show_physical_port(self, port, **_params):
return self.get(self.physical_port_path % port, params=_params)
@APIParamsCall
def list_networks(self, retrieve_all=True, **_params):
return self.list('networks', self.networks_path, retrieve_all,
**_params)
@APIParamsCall
def show_network(self, network, **_params):
return self.get(self.network_path % network, params=_params)
@APIParamsCall
def create_network(self, body=None):
return self.post(self.networks_path, body=body)
@APIParamsCall
def update_network(self, network, body=None):
return self.put(self.network_path % network, body=body)
@APIParamsCall
def delete_network(self, network):
return self.delete(self.network_path % network)
@APIParamsCall
def list_subnets(self, retrieve_all=True, **_params):
return self.list(
'subnets', self.subnets_path, retrieve_all, **_params)
@APIParamsCall
def show_subnet(self, subnet, **_params):
return self.get(self.subnet_path % subnet, params=_params)
@APIParamsCall
def create_subnet(self, body=None):
return self.post(self.subnets_path, body=body)
@APIParamsCall
def update_subnet(self, subnet, body=None):
return self.put(self.subnet_path % subnet, body=body)
@APIParamsCall
def delete_subnet(self, subnet):
return self.delete(self.subnet_path % subnet)
@APIParamsCall
def list_internet_gateways(self, **_params):
return self.list('internet_gateways', self.internet_gateways_path, **_params)
@APIParamsCall
def show_internet_gateway(self, internet_gateway_id, **_params):
return self.get(self.internet_gateway_path % internet_gateway_id, params=_params)
@APIParamsCall
def create_internet_gateway(self, body=None):
return self.post(self.internet_gateways_path, body=body)
@APIParamsCall
def update_internet_gateway(self, internet_gateway_id, body=None):
return self.put(self.internet_gateway_path % internet_gateway_id, body=body)
@APIParamsCall
def delete_internet_gateway(self, internet_gateway_id):
return self.delete(self.internet_gateway_path % internet_gateway_id)
@APIParamsCall
def list_vpn_gateways(self, **_params):
return self.list('vpn_gateways', self.vpn_gateways_path, **_params)
@APIParamsCall
def show_vpn_gateway(self, vpn_gateway_id, **_params):
return self.get(self.vpn_gateway_path % vpn_gateway_id, params=_params)
@APIParamsCall
def create_vpn_gateway(self, body=None):
return self.post(self.vpn_gateways_path, body=body)
@APIParamsCall
def update_vpn_gateway(self, vpn_gateway_id, body=None):
return self.put(self.vpn_gateway_path % vpn_gateway_id, body=body)
@APIParamsCall
def delete_vpn_gateway(self, vpn_gateway_id):
return self.delete(self.vpn_gateway_path % vpn_gateway_id)
@APIParamsCall
def list_fic_gateways(self, **_params):
return self.list('fic_gateways', self.fic_gateways_path, **_params)
@APIParamsCall
def show_fic_gateway(self, fic_gateway_id, **_params):
return self.get(self.fic_gateway_path % fic_gateway_id, params=_params)
@APIParamsCall
def list_interdc_services(self, **_params):
return self.list('interdc_services', self.interdc_services_path, **_params)
@APIParamsCall
def show_interdc_service(self, interdc_service_id, **_params):
return self.get(self.interdc_service_path % interdc_service_id,
**_params)
@APIParamsCall
def list_interdc_gateways(self, **_params):
return self.list('interdc_gateways', self.interdc_gateways_path, **_params)
@APIParamsCall
def show_interdc_gateway(self, interdc_gateway_id, **_params):
return self.get(self.interdc_gateway_path % interdc_gateway_id,
**_params)
@APIParamsCall
def create_interdc_gateway(self, body=None):
return self.post(self.interdc_gateways_path, body=body)
@APIParamsCall
def update_interdc_gateway(self, interdc_gateway_id, body=None):
return self.put(self.interdc_gateway_path % interdc_gateway_id, body=body)
@APIParamsCall
def delete_interdc_gateway(self, interdc_gateway_id):
return self.delete(self.interdc_gateway_path % interdc_gateway_id)
@APIParamsCall
def list_interdc_interfaces(self, **_params):
return self.list('interdc_interfaces',
self.interdc_interfaces_path,
**_params)
@APIParamsCall
def show_interdc_interface(self, interdc_interface_id, **_params):
return self.get(self.interdc_interface_path % interdc_interface_id,
**_params)
@APIParamsCall
def create_interdc_interface(self, body=None):
return self.post(self.interdc_interfaces_path, body=body)
@APIParamsCall
def delete_interdc_interface(self, interdc_interface_id):
return self.delete(self.interdc_interface_path % interdc_interface_id)
@APIParamsCall
def update_interdc_interface(self, interdc_interface_id, body=None):
return self.put(self.interdc_interface_path % interdc_interface_id, body=body)
@APIParamsCall
def list_gw_interfaces(self, **_params):
return self.list('gw_interfaces', self.gw_interfaces_path, **_params)
@APIParamsCall
def show_gw_interface(self, gw_interface_id, **_params):
return self.get(self.gw_interface_path % gw_interface_id, params=_params)
@APIParamsCall
def create_gw_interface(self, body=None):
return self.post(self.gw_interfaces_path, body=body)
@APIParamsCall
def update_gw_interface(self, gw_interface_id, body=None):
return self.put(self.gw_interface_path % gw_interface_id, body=body)
@APIParamsCall
def delete_gw_interface(self, gw_interface_id):
return self.delete(self.gw_interface_path % gw_interface_id)
@APIParamsCall
def list_vpn_interfaces(self, **_params):
return self.list('vpn_interfaces', self.vpn_interfaces_path, **_params)
@APIParamsCall
def show_vpn_interface(self, vpn_interface_id, **_params):
return self.get(self.vpn_interface_path % vpn_interface_id, params=_params)
@APIParamsCall
def create_vpn_interface(self, body=None):
return self.post(self.vpn_interfaces_path, body=body)
@APIParamsCall
def update_vpn_interface(self, vpn_interface_id, body=None):
return self.put(self.vpn_interface_path % vpn_interface_id, body=body)
@APIParamsCall
def delete_vpn_interface(self, vpn_interface_id):
return self.delete(self.vpn_interface_path % vpn_interface_id)
@APIParamsCall
def list_fic_interfaces(self, **_params):
return self.list('fic_interfaces', self.fic_interfaces_path, **_params)
@APIParamsCall
def show_fic_interface(self, fic_interface_id, **_params):
return self.get(self.fic_interface_path % fic_interface_id,
params=_params)
@APIParamsCall
def list_static_routes(self, **_params):
return self.list('static_routes',
self.static_routes_path, **_params)
@APIParamsCall
def show_static_route(self, static_route_id, **_params):
return self.get(self.static_route_path % static_route_id, params=_params)
@APIParamsCall
def create_static_route(self, body=None):
return self.post(self.static_routes_path, body=body)
@APIParamsCall
def update_static_route(self, static_route_id, body=None):
return self.put(self.static_route_path % static_route_id, body=body)
@APIParamsCall
def delete_static_route(self, static_route_id):
return self.delete(self.static_route_path % static_route_id)
@APIParamsCall
def list_public_ips(self, **_params):
return self.list('public_ips',
self.public_ips_path, **_params)
@APIParamsCall
def show_public_ip(self, public_ip_id, **_params):
return self.get(self.public_ip_path % public_ip_id, params=_params)
@APIParamsCall
def create_public_ip(self, body=None):
return self.post(self.public_ips_path, body=body)
@APIParamsCall
def update_public_ip(self, public_ip_id, body=None):
return self.put(self.public_ip_path % public_ip_id, body=body)
@APIParamsCall
def delete_public_ip(self, public_ip_id):
return self.delete(self.public_ip_path % public_ip_id)
@APIParamsCall
def list_internet_services(self, **_params):
return self.list('internet_services', self.internet_services_path, **_params)
@APIParamsCall
def show_internet_service(self, internet_service_id, **_params):
return self.get(self.internet_service_path % internet_service_id, params=_params)
@APIParamsCall
def list_vpn_services(self, **_params):
return self.list('vpn_services', self.vpn_services_path, **_params)
@APIParamsCall
def show_vpn_service(self, vpn_service_id, **_params):
return self.get(self.vpn_service_path % vpn_service_id, params=_params)
@APIParamsCall
def list_fic_services(self, **_params):
return self.list('fic_services', self.fic_services_path, **_params)
@APIParamsCall
def show_fic_service(self, fic_service_id, **_params):
return self.get(self.fic_service_path % fic_service_id, params=_params)
@APIParamsCall
def list_public_ip_pools(self, **_params):
return self.list('public_ip_pools',
self.public_ip_pools_path, **_params)
@APIParamsCall
def show_public_ip_pool(self, public_ip_pool_id, **_params):
return self.get(self.public_ip_pool_path % public_ip_pool_id, params=_params)
@APIParamsCall
def list_qos_options(self, **_params):
return self.list('qos_options',
self.qos_options_path, **_params)
@APIParamsCall
def show_qos_option(self, qos_option_id, **_params):
return self.get(self.qos_option_path % qos_option_id, params=_params)
@APIParamsCall
def list_firewalls(self, **_params):
return self.list('firewalls', self.firewalls_path, **_params)
@APIParamsCall
def create_firewall(self, body=None):
return self.post(self.firewalls_path, body=body)
@APIParamsCall
def show_firewall(self, firewall_id, **_params):
return self.get(self.firewall_path % firewall_id, params=_params)
@APIParamsCall
def update_firewall(self, firewall_id, body=None):
return self.put(self.firewall_path % firewall_id, body=body)
@APIParamsCall
def delete_firewall(self, firewall_id):
return self.delete(self.firewall_path % firewall_id)
@APIParamsCall
def reboot_firewall(self, firewall_id, body=None):
return self.post(self.firewall_path % firewall_id + "/reboot", body=body)
@APIParamsCall
def reset_password_firewall(self, firewall_id, body=None):
return self.post(self.firewall_path % firewall_id + "/reset_password", body=body)
@APIParamsCall
def list_firewall_interfaces(self, **_params):
return self.list('firewall_interfaces', self.firewall_interfaces_path, **_params)
@APIParamsCall
def show_firewall_interface(self, firewall_interface_id, **_params):
return self.get(self.firewall_interface_path % firewall_interface_id, params=_params)
@APIParamsCall
def update_firewall_interface(self, firewall_interface_id, body=None):
return self.put(self.firewall_interface_path % firewall_interface_id, body=body)
@APIParamsCall
def list_firewall_plans(self, **_params):
return self.list('firewall_plans', self.firewall_plans_path, **_params)
@APIParamsCall
def show_firewall_plan(self, firewall_plan_id, **_params):
return self.get(self.firewall_plan_path % firewall_plan_id, params=_params)
@APIParamsCall
def list_loadbalancers(self, **_params):
return self.list('load_balancers', self.loadbalancers_path, **_params)
@APIParamsCall
def create_loadbalancer(self, body=None):
return self.post(self.loadbalancers_path, body=body)
@APIParamsCall
def show_loadbalancer(self, loadbalancer_id, **_params):
return self.get(self.loadbalancer_path % loadbalancer_id, params=_params)
@APIParamsCall
def update_loadbalancer(self, loadbalancer_id, body=None):
return self.put(self.loadbalancer_path % loadbalancer_id, body=body)
@APIParamsCall
def delete_loadbalancer(self, loadbalancer_id):
return self.delete(self.loadbalancer_path % loadbalancer_id)
@APIParamsCall
def reboot_loadbalancer(self, loadbalancer_id, body=None):
response = self.post(self.loadbalancer_path % loadbalancer_id + "/reboot", body=body)
@APIParamsCall
def list_loadbalancer_plans(self, **_params):
return self.list('load_balancer_plans', self.loadbalancer_plans_path, **_params)
@APIParamsCall
def create_loadbalancer_plan(self, body=None):
return self.post(self.loadbalancer_plans_path, body=body)
@APIParamsCall
def show_loadbalancer_plan(self, loadbalancerplan_id, **_params):
return self.get(self.loadbalancer_plan_path % loadbalancerplan_id, params=_params)
@APIParamsCall
def update_loadbalancer_plan(self, loadbalancerplan_id, body=None):
return self.put(self.loadbalancer_plan_path % loadbalancerplan_id, body=body)
@APIParamsCall
def delete_loadbalancer_plan(self, loadbalancerplan_id):
return self.delete(self.loadbalancer_plan_path % loadbalancerplan_id)
@APIParamsCall
def list_loadbalancer_interfaces(self, **_params):
return self.list('load_balancer_interfaces', self.loadbalancer_interfaces_path, **_params)
@APIParamsCall
def show_loadbalancer_interface(self, loadbalancer_interface_id, **_params):
return self.get(self.loadbalancer_interface_path % loadbalancer_interface_id, params=_params)
@APIParamsCall
def update_loadbalancer_interface(self, loadbalancer_interface_id, body=None):
return self.put(self.loadbalancer_interface_path % loadbalancer_interface_id, body=body)
@APIParamsCall
def reset_password_loadbalancer(self, loadbalancer_id, body=None):
return self.post(self.loadbalancer_path % loadbalancer_id + "/reset_password", body=body)
@APIParamsCall
def list_loadbalancer_syslog_servers(self, **_params):
return self.list('load_balancer_syslog_servers', self.loadbalancer_syslog_servers_path, **_params)
@APIParamsCall
def show_loadbalancer_syslog_server(self, loadbalancer_syslog_server_id, **_params):
return self.get(self.loadbalancer_syslog_server_path % loadbalancer_syslog_server_id, params=_params)
@APIParamsCall
def create_loadbalancer_syslog_server(self, body=None):
return self.post(self.loadbalancer_syslog_servers_path, body=body)
@APIParamsCall
def delete_loadbalancer_syslog_server(self, loadbalancer_syslog_server_id, body=None):
return self.delete(self.loadbalancer_syslog_server_path % loadbalancer_syslog_server_id, body=body)
@APIParamsCall
def update_loadbalancer_syslog_server(self, loadbalancer_syslog_server_id, body=None):
return self.put(self.loadbalancer_syslog_server_path % loadbalancer_syslog_server_id, body=body)
@APIParamsCall
def list_cfgws(self, **_params):
return self.list(self._cfgw_plural, self.common_function_gateways_path, **_params)
@APIParamsCall
def show_cfgw(self, cfgw_id, **_params):
return self.get(self.common_function_gateway_path % cfgw_id, params=_params)
@APIParamsCall
def create_cfgw(self, body=None):
return self.post(self.common_function_gateways_path, body=body)
@APIParamsCall
def update_cfgw(self, cfgw_id, body=None):
return self.put(self.common_function_gateway_path % cfgw_id, body=body)
@APIParamsCall
def delete_cfgw(self, cfgw_id):
return self.delete(self.common_function_gateway_path % cfgw_id)
@APIParamsCall
def list_common_function_pools(self, **_params):
return self.list(self._common_function_pool_plural,
self.common_function_pools_path, **_params)
@APIParamsCall
def show_common_function_pool(self, cfp_id, **_params):
return self.get(self.common_function_pool_path % cfp_id, params=_params)
@APIParamsCall
def list_common_functions(self, **_params):
return self.list(self._common_function_plural,
self.common_functions_path, **_params)
@APIParamsCall
def show_common_function(self, cfp_id, **_params):
return self.get(self.common_function_path % cfp_id, params=_params)
@APIParamsCall
def list_reserve_addresses(self, **_params):
return self.list('reserve_addresses', self.reserve_addresses_path, **_params)
@APIParamsCall
def show_reserve_address(self, tenant_id, **_params):
ra = self.get(self.reserve_address_path % tenant_id, params=_params)
return ra
@APIParamsCall
def list_colo_logical_links(self, **_params):
return self.list(self._colo_logical_link_plural,
self.colocation_logical_links_path, **_params)
@APIParamsCall
def show_colo_logical_link(self, colo_logical_link_id, **_params):
return self.get(
self.colocation_logical_link_path % colo_logical_link_id,
params=_params)
@APIParamsCall
def create_colo_logical_link(self, body=None):
return self.post(self.colocation_logical_links_path,
body=body)
@APIParamsCall
def update_colo_logical_link(self, colo_logical_link_id, body=None):
return self.put(self.colocation_logical_link_path %
colo_logical_link_id, body=body)
@APIParamsCall
def delete_colo_logical_link(self, colo_logical_link_id):
return self.delete(self.colocation_logical_link_path %
colo_logical_link_id)
@APIParamsCall
def list_colo_physical_links(self, **_params):
return self.list(self._colo_physical_link_plural,
self.colocation_physical_links_path, **_params)
@APIParamsCall
def show_colo_physical_link(self, colo_physical_link_id, **_params):
return self.get(
self.colocation_physical_link_path % colo_physical_link_id,
params=_params)
@APIParamsCall
def list_colo_spaces(self, **_params):
return self.list(self._colo_space_plural,
self.colocation_spaces_path, **_params)
@APIParamsCall
def show_colo_space(self, colo_space_id, **_params):
return self.get(
self.colocation_space_path % colo_space_id,
params=_params)
|
|
from itertools import product
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.common.exceptions as com
clickhouse_driver = pytest.importorskip("clickhouse_driver")
@pytest.fixture(scope='module')
def diamonds(con):
return con.table('diamonds')
@pytest.fixture(scope='module')
def batting(con):
return con.table('batting')
@pytest.fixture(scope='module')
def awards_players(con):
return con.table('awards_players')
def test_timestamp_extract_field(con, db, alltypes):
t = alltypes.timestamp_col
expr = alltypes[
t.year().name('year'),
t.month().name('month'),
t.day().name('day'),
t.hour().name('hour'),
t.minute().name('minute'),
t.second().name('second'),
]
result = ibis.clickhouse.compile(expr)
expected = """\
SELECT toYear(`timestamp_col`) AS `year`, toMonth(`timestamp_col`) AS `month`,
toDayOfMonth(`timestamp_col`) AS `day`,
toHour(`timestamp_col`) AS `hour`,
toMinute(`timestamp_col`) AS `minute`,
toSecond(`timestamp_col`) AS `second`
FROM {0}.`functional_alltypes`"""
assert result == expected.format(db.name)
def test_isin_notin_in_select(con, db, alltypes, translate):
values = {'foo', 'bar'}
filtered = alltypes[alltypes.string_col.isin(values)]
result = ibis.clickhouse.compile(filtered)
expected = """SELECT *
FROM {}.`functional_alltypes`
WHERE `string_col` IN {}"""
assert result == expected.format(db.name, tuple(values))
filtered = alltypes[alltypes.string_col.notin(values)]
result = ibis.clickhouse.compile(filtered)
expected = """SELECT *
FROM {}.`functional_alltypes`
WHERE `string_col` NOT IN {}"""
assert result == expected.format(db.name, tuple(values))
def test_head(alltypes):
result = alltypes.head().execute()
expected = alltypes.limit(5).execute()
tm.assert_frame_equal(result, expected)
def test_limit_offset(alltypes):
expected = alltypes.execute()
tm.assert_frame_equal(alltypes.limit(4).execute(), expected.head(4))
tm.assert_frame_equal(alltypes.limit(8).execute(), expected.head(8))
tm.assert_frame_equal(
alltypes.limit(4, offset=2).execute(),
expected.iloc[2:6].reset_index(drop=True),
)
def test_subquery(alltypes, df):
t = alltypes
expr = t.mutate(d=t.double_col).limit(1000).group_by('string_col').size()
result = expr.execute()
result = result.sort_values('string_col').reset_index(drop=True)
expected = (
df.assign(d=df.double_col.fillna(0))
.head(1000)
.groupby('string_col')
.string_col.count()
.reset_index(name='count')
.sort_values('string_col')
.reset_index(drop=True)
)
result['count'] = result['count'].astype('int64')
tm.assert_frame_equal(result, expected)
def test_simple_scalar_aggregates(db, alltypes):
# Things like table.column.{sum, mean, ...}()
table = alltypes
expr = table[table.int_col > 0].float_col.sum()
sql_query = ibis.clickhouse.compile(expr)
expected = """SELECT sum(`float_col`) AS `sum`
FROM {0}.`functional_alltypes`
WHERE `int_col` > 0"""
assert sql_query == expected.format(db.name)
# def test_scalar_aggregates_multiple_tables(alltypes):
# # #740
# table = ibis.table([('flag', 'string'),
# ('value', 'double')],
# 'tbl')
# flagged = table[table.flag == '1']
# unflagged = table[table.flag == '0']
# expr = flagged.value.mean() / unflagged.value.mean() - 1
# result = ibis.clickhouse.compile(expr)
# expected = """\
# SELECT (t0.`mean` / t1.`mean`) - 1 AS `tmp`
# FROM (
# SELECT avg(`value`) AS `mean`
# FROM tbl
# WHERE `flag` = '1'
# ) t0
# CROSS JOIN (
# SELECT avg(`value`) AS `mean`
# FROM tbl
# WHERE `flag` = '0'
# ) t1"""
# assert result == expected
# fv = flagged.value
# uv = unflagged.value
# expr = (fv.mean() / fv.sum()) - (uv.mean() / uv.sum())
# result = ibis.clickhouse.compile(expr)
# expected = """\
# SELECT t0.`tmp` - t1.`tmp` AS `tmp`
# FROM (
# SELECT avg(`value`) / sum(`value`) AS `tmp`
# FROM tbl
# WHERE `flag` = '1'
# ) t0
# CROSS JOIN (
# SELECT avg(`value`) / sum(`value`) AS `tmp`
# FROM tbl
# WHERE `flag` = '0'
# ) t1"""
# assert result == expected
# TODO use alltypes
def test_table_column_unbox(db, alltypes):
m = alltypes.float_col.sum().name('total')
agged = (
alltypes[alltypes.int_col > 0].group_by('string_col').aggregate([m])
)
expr = agged.string_col
sql_query = ibis.clickhouse.compile(expr)
expected = """\
SELECT `string_col`
FROM (
SELECT `string_col`, sum(`float_col`) AS `total`
FROM {0}.`functional_alltypes`
WHERE `int_col` > 0
GROUP BY `string_col`
) t0"""
assert sql_query == expected.format(db.name)
def test_complex_array_expr_projection(db, alltypes):
# May require finding the base table and forming a projection.
expr = alltypes.group_by('string_col').aggregate(
[alltypes.count().name('count')]
)
expr2 = expr.string_col.cast('double')
query = ibis.clickhouse.compile(expr2)
expected = """SELECT CAST(`string_col` AS Float64) AS `tmp`
FROM (
SELECT `string_col`, count(*) AS `count`
FROM {0}.`functional_alltypes`
GROUP BY `string_col`
) t0"""
assert query == expected.format(db.name)
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.now(), 'SELECT now() AS `tmp`'),
(ibis.literal(1) + ibis.literal(2), 'SELECT 1 + 2 AS `tmp`'),
],
)
def test_scalar_exprs_no_table_refs(expr, expected):
assert ibis.clickhouse.compile(expr) == expected
# TODO: use alltypes
def test_isnull_case_expr_rewrite_failure(db, alltypes):
# #172, case expression that was not being properly converted into an
# aggregation
reduction = alltypes.string_col.isnull().ifelse(1, 0).sum()
result = ibis.clickhouse.compile(reduction)
expected = """\
SELECT sum(CASE WHEN isNull(`string_col`) THEN 1 ELSE 0 END) AS `sum`
FROM {0}.`functional_alltypes`"""
assert result == expected.format(db.name)
# def test_nameless_table(con):
# # Generate a unique table name when we haven't passed on
# nameless = con.table([('key', 'string')])
# assert ibis.clickhouse.compile(nameless) == 'SELECT *\nFROM {}'.format(
# nameless.op().name
# )
# with_name = con.table([('key', 'string')], name='baz')
# result = ibis.clickhouse.compile(with_name)
# assert result == 'SELECT *\nFROM baz'
def test_physical_table_reference_translate(db, alltypes):
# If an expression's table leaves all reference database tables, verify
# we translate correctlys
sql_string = ibis.clickhouse.compile(alltypes)
expected = "SELECT *\nFROM {0}.`functional_alltypes`"
assert sql_string == expected.format(db.name)
def test_non_equijoin(alltypes):
t = alltypes.limit(100)
t2 = t.view()
expr = t.join(t2, t.tinyint_col < t2.timestamp_col.minute()).count()
with pytest.raises(com.TranslationError):
expr.execute()
@pytest.mark.parametrize(
('join_type_and_clause', 'join_keys'),
product(
[
('any_inner_join', 'ANY INNER JOIN'),
('inner_join', 'ALL INNER JOIN'),
('any_left_join', 'ANY LEFT OUTER JOIN'),
('left_join', 'ALL LEFT OUTER JOIN'),
],
[
('playerID', 'playerID'),
('playerID', 'awardID'),
], # noqa: E231
),
)
def test_simple_joins(
con, db, batting, awards_players, join_type_and_clause, join_keys
):
join_type, join_clause = join_type_and_clause
t1, t2 = batting, awards_players
pred = [t1[join_keys[0]] == t2[join_keys[1]]]
join_keys_str = f' ON t0.`{join_keys[0]}` = t1.`{join_keys[1]}`'
expr = getattr(t1, join_type)(t2, pred)[[t1]]
expected = (
'SELECT t0.*\n'
f'FROM {db.name}.`batting` t0\n'
f' {join_clause} {db.name}.`awards_players` t1\n'
f'{join_keys_str}'
)
assert ibis.clickhouse.compile(expr) == expected
con.execute(expr)
def test_self_reference_simple(con, db, alltypes):
expr = alltypes.view()
result_sql = ibis.clickhouse.compile(expr)
expected_sql = "SELECT *\nFROM {0}.`functional_alltypes`"
assert result_sql == expected_sql.format(db.name)
assert len(con.execute(expr))
def test_join_self_reference(con, db, alltypes):
t1 = alltypes
t2 = t1.view()
expr = t1.any_inner_join(t2, ['id'])[[t1]]
result_sql = ibis.clickhouse.compile(expr)
expected_sql = (
'SELECT t0.*\n'
f'FROM {db.name}.`functional_alltypes` t0\n'
f' ANY INNER JOIN {db.name}.`functional_alltypes` t1\n'
' ON t0.`id` = t1.`id`'
)
assert result_sql == expected_sql
assert len(con.execute(expr))
def test_where_simple_comparisons(con, db, alltypes):
t1 = alltypes
expr = t1.filter([t1.float_col > 0, t1.int_col < t1.float_col * 2])
result = ibis.clickhouse.compile(expr)
expected = """SELECT *
FROM {0}.`functional_alltypes`
WHERE (`float_col` > 0) AND
(`int_col` < (`float_col` * 2))"""
assert result == expected.format(db.name)
assert len(con.execute(expr))
def test_where_with_between(con, db, alltypes):
t = alltypes
expr = t.filter([t.int_col > 0, t.float_col.between(0, 1)])
result = ibis.clickhouse.compile(expr)
expected = """SELECT *
FROM {0}.`functional_alltypes`
WHERE (`int_col` > 0) AND
(`float_col` BETWEEN 0 AND 1)"""
assert result == expected.format(db.name)
con.execute(expr)
def test_where_use_if(con, alltypes, translate):
expr = ibis.where(
alltypes.float_col > 0, alltypes.int_col, alltypes.bigint_col
)
result = translate(expr)
expected = "if(`float_col` > 0, `int_col`, `bigint_col`)"
assert result == expected
con.execute(expr)
@pytest.mark.xfail(
raises=com.RelationError, reason='Expression equality is broken'
)
def test_filter_predicates(diamonds):
predicates = [
lambda x: x.color.lower().like('%de%'),
# lambda x: x.color.lower().contains('de'),
lambda x: x.color.lower().rlike('.*ge.*'),
]
expr = diamonds
for pred in predicates:
expr = expr[pred(expr)].projection([expr])
expr.execute()
def test_where_with_timestamp():
t = ibis.table(
[('uuid', 'string'), ('ts', 'timestamp'), ('search_level', 'int64')],
name='t',
)
expr = t.group_by(t.uuid).aggregate(
min_date=t.ts.min(where=t.search_level == 1)
)
result = ibis.clickhouse.compile(expr)
expected = """\
SELECT `uuid`, minIf(`ts`, `search_level` = 1) AS `min_date`
FROM t
GROUP BY `uuid`"""
assert result == expected
def test_timestamp_scalar_in_filter(alltypes, translate):
table = alltypes
expr = table.filter(
[
table.timestamp_col
< (ibis.timestamp('2010-01-01') + ibis.interval(weeks=3)),
table.timestamp_col < (ibis.now() + ibis.interval(days=10)),
]
).count()
expr.execute()
def test_named_from_filter_groupby():
t = ibis.table([('key', 'string'), ('value', 'double')], name='t0')
gb = t.filter(t.value == 42).groupby(t.key)
sum_expr = lambda t: (t.value + 1 + 2 + 3).sum() # noqa: E731
expr = gb.aggregate(abc=sum_expr)
expected = """\
SELECT `key`, sum(((`value` + 1) + 2) + 3) AS `abc`
FROM t0
WHERE `value` = 42
GROUP BY `key`"""
assert ibis.clickhouse.compile(expr) == expected
expr = gb.aggregate(foo=sum_expr)
expected = """\
SELECT `key`, sum(((`value` + 1) + 2) + 3) AS `foo`
FROM t0
WHERE `value` = 42
GROUP BY `key`"""
assert ibis.clickhouse.compile(expr) == expected
def test_join_with_external_table_errors(con, alltypes, df):
external_table = ibis.table(
[('a', 'string'), ('b', 'int64'), ('c', 'string')], name='external'
)
alltypes = alltypes.mutate(b=alltypes.tinyint_col)
expr = alltypes.inner_join(external_table, ['b'])[
external_table.a, external_table.c, alltypes.id
]
with pytest.raises(clickhouse_driver.errors.ServerException):
expr.execute()
with pytest.raises(TypeError):
expr.execute(external_tables={'external': []})
def test_join_with_external_table(con, alltypes, df):
external_df = pd.DataFrame(
[('alpha', 1, 'first'), ('beta', 2, 'second'), ('gamma', 3, 'third')],
columns=['a', 'b', 'c'],
)
external_df['b'] = external_df['b'].astype('int8')
external_table = ibis.table(
[('a', 'string'), ('b', 'int64'), ('c', 'string')], name='external'
)
alltypes = alltypes.mutate(b=alltypes.tinyint_col)
expr = alltypes.inner_join(external_table, ['b'])[
external_table.a, external_table.c, alltypes.id
]
result = expr.execute(external_tables={'external': external_df})
expected = df.assign(b=df.tinyint_col).merge(external_df, on='b')[
['a', 'c', 'id']
]
result = result.sort_values('id').reset_index(drop=True)
expected = expected.sort_values('id').reset_index(drop=True)
tm.assert_frame_equal(result, expected, check_column_type=False)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ApplicationTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications.create()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Applications.json',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"date_created": "Mon, 22 Aug 2011 20:59:45 +0000",
"date_updated": "Tue, 18 Aug 2015 16:48:57 +0000",
"friendly_name": "Application Friendly Name",
"message_status_callback": "http://www.example.com/sms-status-callback",
"sid": "APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_fallback_method": "GET",
"sms_fallback_url": "http://www.example.com/sms-fallback",
"sms_method": "GET",
"sms_status_callback": "http://www.example.com/sms-status-callback",
"sms_url": "http://example.com",
"status_callback": "http://example.com",
"status_callback_method": "GET",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications/APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"voice_caller_id_lookup": false,
"voice_fallback_method": "GET",
"voice_fallback_url": "http://www.example.com/voice-callback",
"voice_method": "GET",
"voice_url": "http://example.com"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications.create()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications(sid="APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Applications/APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications(sid="APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications(sid="APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Applications/APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"date_created": "Mon, 22 Aug 2011 20:59:45 +0000",
"date_updated": "Tue, 18 Aug 2015 16:48:57 +0000",
"friendly_name": "Application Friendly Name",
"message_status_callback": "http://www.example.com/sms-status-callback",
"sid": "APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_fallback_method": "GET",
"sms_fallback_url": "http://www.example.com/sms-fallback",
"sms_method": "GET",
"sms_status_callback": "http://www.example.com/sms-status-callback",
"sms_url": "http://example.com",
"status_callback": "http://example.com",
"status_callback_method": "GET",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications/APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"voice_caller_id_lookup": false,
"voice_fallback_method": "GET",
"voice_fallback_url": "http://www.example.com/voice-callback",
"voice_method": "GET",
"voice_url": "http://example.com"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications(sid="APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Applications.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"applications": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"date_created": "Fri, 21 Aug 2015 00:07:25 +0000",
"date_updated": "Fri, 21 Aug 2015 00:07:25 +0000",
"friendly_name": "d8821fb7-4d01-48b2-bdc5-34e46252b90b",
"message_status_callback": null,
"sid": "APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_fallback_method": "POST",
"sms_fallback_url": null,
"sms_method": "POST",
"sms_status_callback": null,
"sms_url": null,
"status_callback": null,
"status_callback_method": "POST",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications/APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"voice_caller_id_lookup": false,
"voice_fallback_method": "POST",
"voice_fallback_url": null,
"voice_method": "POST",
"voice_url": null
}
],
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json?PageSize=1&Page=0",
"next_page_uri": null,
"previous_page_uri": null,
"page_size": 1,
"page": 0,
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json?PageSize=1&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"applications": [],
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json?PageSize=1&Page=0",
"previous_page_uri": null,
"page_size": 1,
"start": 0,
"next_page_uri": null,
"page": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json?PageSize=1&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications(sid="APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Applications/APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"date_created": "Mon, 22 Aug 2011 20:59:45 +0000",
"date_updated": "Tue, 18 Aug 2015 16:48:57 +0000",
"friendly_name": "Application Friendly Name",
"message_status_callback": "http://www.example.com/sms-status-callback",
"sid": "APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_fallback_method": "GET",
"sms_fallback_url": "http://www.example.com/sms-fallback",
"sms_method": "GET",
"sms_status_callback": "http://www.example.com/sms-status-callback",
"sms_url": "http://example.com",
"status_callback": "http://example.com",
"status_callback_method": "GET",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications/APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"voice_caller_id_lookup": false,
"voice_fallback_method": "GET",
"voice_fallback_url": "http://www.example.com/voice-callback",
"voice_method": "GET",
"voice_url": "http://example.com"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.applications(sid="APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
|
|
# coding= utf-8
'''
unregistered setup:
1. at .do creation, unique hash ID from SITE is used. So it being per-project.
2. any access to SITE using that ID is granted to anon
registered setup:
0. user can secure his ID's by applying his hash (username+pass) in config
1. same as unregistered.1
2. any access to SITE using that ID is specified by user
flow:
- each http request specifies projects/tasks repository by hash (public)
OR by username (secured)
- each rep hold number of projects
public rep:
- rep is initialised by requesting name from server (at config)
- repository can be accessed anonymously, both reading and writing.
*The ONLY protection from intrusion so far is hash complexity (xx-byte?)
- project accessed by specifying rep hash and project name within rep
secured rep:
- rep is initialised by creating on server/requesting while logged
- rep is accessed by owner only (by default)
- rep access can be expanded by owner
either:
- task editor name specified by rather an plain text (anon) or by logged user id
'''
import sys, json, encodings.idna
if sys.version < '3':
import urllib2, urllib
else:
import urllib.request as urllib2
import urllib.parse as urllib
if sys.version < '3':
from task import *
from c import *
else:
from .task import *
from .c import *
class TodoDbHttp():
name= 'Http'
lastId= None
settings= None
parentDB= False
dbId= None
timeout= 10
def __init__(self, _parentDB, _settings, _dbId):
self.settings= _settings
self.parentDB= _parentDB
self.dbId= _dbId
def flush(self):
postData= {}
postList= list()
postTodoA= {}
for iT in self.parentDB.todoA:
curTodo= self.parentDB.todoA[iT]
if not curTodo.savePending(self.dbId):
continue
curTodo.setSaved(SAVE_STATES.HOLD, self.dbId) #poke out from saving elsewhere
for cState in STATE_LIST:
if cState and cState[0]==curTodo.state:
break
postList.append(str(curTodo.id))
postTodoA['state' +str(curTodo.id)]= (cState or STATE_DEFAULT)[1]
postTodoA['file' +str(curTodo.id)]= curTodo.fileName
postTodoA['tags' +str(curTodo.id)]= ','.join(curTodo.tagsA)
postTodoA['lvl' +str(curTodo.id)]= curTodo.lvl
postTodoA['comm' +str(curTodo.id)]= curTodo.comment
postTodoA['stamp' +str(curTodo.id)]= curTodo.stamp
if not len(postList):
return True
postTodoA['ids']= ','.join(postList)
postData['v']= 1
postData['todoa']= json.dumps(postTodoA)
postData['logName']= urllib2.quote(self.parentDB.config.projectUser)
if self.settings.login!='' and self.settings.password!='':
postData['logName']= urllib2.quote(self.settings.login)
postData['logPass']= urllib2.quote(self.settings.password)
postData['rep']= self.settings.repository
postData['project']= urllib2.quote(self.settings.fullProject)
response= self.callHTTP('?=flush', postData)
if response==None:
return False
if response=='':
print('TypeTodo: HTTP server returns unexpected result. Repository: ' +self.settings.repository)
return False
allOk= True
response= json.loads(response)
for respId in response:
curTodo= self.parentDB.todoA[int(respId)]
if not self.parentDB.todoA[int(respId)]:
print('TypeTodo: Server responded task ' +respId +' that doesn\'t exists. Skipping')
continue
elif response[respId]!=0:
print('TypeTodo: Task ' +respId +' was not saved yet. Error returned: ' +response[respId])
allOk= False
else:
if curTodo.saveProgress(self.dbId): #edited-while-save todo will not become idle here
curTodo.setSaved(SAVE_STATES.IDLE, self.dbId)
return allOk
def newId(self, _wantedId=0):
if _wantedId==self.lastId:
return self.lastId
postData= {}
postData['wantedId']= _wantedId
postData['logName']= urllib2.quote(self.parentDB.config.projectUser)
if self.settings.login!='' and self.settings.password!='':
postData['logName']= urllib2.quote(self.settings.login)
postData['logPass']= urllib2.quote(self.settings.password)
postData['rep']= self.settings.repository
postData['project']= urllib2.quote(self.settings.fullProject)
response= self.callHTTP('?=new_task_id', postData)
if response==None:
return False
if str(int(response)) != response:
print('TypeTodo: HTTP server fails creating todo')
response= False
self.lastId= int(response)
return self.lastId
def releaseId(self, _atExit=False):
postData= {}
postData['wantedId']= self.lastId
postData['logName']= urllib2.quote(self.parentDB.config.projectUser)
if self.settings.login!='' and self.settings.password!='':
postData['logName']= urllib2.quote(self.settings.login)
postData['logPass']= urllib2.quote(self.settings.password)
postData['rep']= self.settings.repository
postData['project']= urllib2.quote(self.settings.fullProject)
response= self.callHTTP('?=release_task_id', postData)
if response==None:
return False
response= response or 0
if str(int(response)) != response:
print('TypeTodo: HTTP server fails releasing todo')
response= False
return response
def fetch(self):
postData= {}
postData['rep']= self.settings.repository
postData['project']= urllib2.quote(self.settings.fullProject)
if self.settings.login!='' and self.settings.password!='':
postData['logName']= urllib2.quote(self.settings.login)
postData['logPass']= urllib2.quote(self.settings.password)
response= self.callHTTP('?=fetch_tasks', postData)
if response==None:
return False
todoA= {}
for task in json.loads(response):
__id= int(task['id'])
if __id not in todoA:
todoA[__id]= TodoTask(__id, self.parentDB.config.projectName, self.parentDB)
fetchedStateName= task['namestate']
for cState in STATE_LIST:
if cState and cState[1]==fetchedStateName:
break
tags= task['nametag'].split(',')
todoA[__id].set((cState or STATE_DEFAULT)[0], tags, task['priority'], task['namefile'], task['comment'], task['nameuser'], int(task['ustamp']))
return todoA
lastServerState= True #assuming
'''
_url
str: path from root
_post
dict: POST data
'''
def callHTTP(self, _url, _post):
req= urllib2.Request('http://%s/%s' % (self.settings.host, _url), str.encode(urllib.urlencode(_post)))
try:
response= bytes.decode( urllib2.urlopen(req, None, self.timeout).read() )
except Exception as e:
if self.lastServerState:
print('TypeTodo: HTTP connection unavailable.')
self.lastServerState= False
return
if not self.lastServerState:
print('TypeTodo: HTTP connection restored.')
self.lastServerState= True
return response
|
|
"""The tests for the hassio component."""
from datetime import timedelta
import os
from unittest.mock import patch
import pytest
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components import frontend
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.hassio import ADDONS_COORDINATOR, DOMAIN, STORAGE_KEY
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.helpers.device_registry import async_get
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
MOCK_ENVIRON = {"HASSIO": "127.0.0.1", "HASSIO_TOKEN": "abcdefgh"}
@pytest.fixture(autouse=True)
def mock_all(aioclient_mock, request):
"""Mock all setup requests."""
aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"})
aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"})
aioclient_mock.get(
"http://127.0.0.1/info",
json={
"result": "ok",
"data": {"supervisor": "222", "homeassistant": "0.110.0", "hassos": None},
},
)
aioclient_mock.get(
"http://127.0.0.1/store",
json={
"result": "ok",
"data": {"addons": [], "repositories": []},
},
)
aioclient_mock.get(
"http://127.0.0.1/host/info",
json={
"result": "ok",
"data": {
"result": "ok",
"data": {
"chassis": "vm",
"operating_system": "Debian GNU/Linux 10 (buster)",
"kernel": "4.19.0-6-amd64",
},
},
},
)
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={"result": "ok", "data": {"version_latest": "1.0.0"}},
)
aioclient_mock.get(
"http://127.0.0.1/os/info",
json={"result": "ok", "data": {"version_latest": "1.0.0"}},
)
aioclient_mock.get(
"http://127.0.0.1/supervisor/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0"},
"addons": [
{
"name": "test",
"slug": "test",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"repository": "core",
"url": "https://github.com/home-assistant/addons/test",
},
{
"name": "test2",
"slug": "test2",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"repository": "core",
"url": "https://github.com",
},
],
},
)
aioclient_mock.get(
"http://127.0.0.1/ingress/panels", json={"result": "ok", "data": {"panels": {}}}
)
async def test_setup_api_ping(hass, aioclient_mock):
"""Test setup with API ping."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {})
assert result
assert aioclient_mock.call_count == 10
assert hass.components.hassio.get_core_info()["version_latest"] == "1.0.0"
assert hass.components.hassio.is_hassio()
async def test_setup_api_panel(hass, aioclient_mock):
"""Test setup with API ping."""
assert await async_setup_component(hass, "frontend", {})
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {})
assert result
panels = hass.data[frontend.DATA_PANELS]
assert panels.get("hassio").to_response() == {
"component_name": "custom",
"icon": "hass:home-assistant",
"title": "Supervisor",
"url_path": "hassio",
"require_admin": True,
"config": {
"_panel_custom": {
"embed_iframe": True,
"js_url": "/api/hassio/app/entrypoint.js",
"name": "hassio-main",
"trust_external": False,
}
},
}
async def test_setup_api_push_api_data(hass, aioclient_mock):
"""Test setup with API push."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(
hass, "hassio", {"http": {"server_port": 9999}, "hassio": {}}
)
assert result
assert aioclient_mock.call_count == 10
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 9999
assert aioclient_mock.mock_calls[1][2]["watchdog"]
async def test_setup_api_push_api_data_server_host(hass, aioclient_mock):
"""Test setup with API push with active server host."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(
hass,
"hassio",
{"http": {"server_port": 9999, "server_host": "127.0.0.1"}, "hassio": {}},
)
assert result
assert aioclient_mock.call_count == 10
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 9999
assert not aioclient_mock.mock_calls[1][2]["watchdog"]
async def test_setup_api_push_api_data_default(hass, aioclient_mock, hass_storage):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert aioclient_mock.call_count == 10
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 8123
refresh_token = aioclient_mock.mock_calls[1][2]["refresh_token"]
hassio_user = await hass.auth.async_get_user(
hass_storage[STORAGE_KEY]["data"]["hassio_user"]
)
assert hassio_user is not None
assert hassio_user.system_generated
assert len(hassio_user.groups) == 1
assert hassio_user.groups[0].id == GROUP_ID_ADMIN
assert hassio_user.name == "Supervisor"
for token in hassio_user.refresh_tokens.values():
if token.token == refresh_token:
break
else:
assert False, "refresh token not found"
async def test_setup_adds_admin_group_to_user(hass, aioclient_mock, hass_storage):
"""Test setup with API push default data."""
# Create user without admin
user = await hass.auth.async_create_system_user("Hass.io")
assert not user.is_admin
await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
"data": {"hassio_user": user.id},
"key": STORAGE_KEY,
"version": 1,
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert user.is_admin
async def test_setup_migrate_user_name(hass, aioclient_mock, hass_storage):
"""Test setup with migrating the user name."""
# Create user with old name
user = await hass.auth.async_create_system_user("Hass.io")
await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
"data": {"hassio_user": user.id},
"key": STORAGE_KEY,
"version": 1,
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert user.name == "Supervisor"
async def test_setup_api_existing_hassio_user(hass, aioclient_mock, hass_storage):
"""Test setup with API push default data."""
user = await hass.auth.async_create_system_user("Hass.io test")
token = await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"hassio_user": user.id}}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert aioclient_mock.call_count == 10
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 8123
assert aioclient_mock.mock_calls[1][2]["refresh_token"] == token.token
async def test_setup_core_push_timezone(hass, aioclient_mock):
"""Test setup with API push default data."""
hass.config.time_zone = "testzone"
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"hassio": {}})
assert result
assert aioclient_mock.call_count == 10
assert aioclient_mock.mock_calls[2][2]["timezone"] == "testzone"
with patch("homeassistant.util.dt.set_default_time_zone"):
await hass.config.async_update(time_zone="America/New_York")
await hass.async_block_till_done()
assert aioclient_mock.mock_calls[-1][2]["timezone"] == "America/New_York"
async def test_setup_hassio_no_additional_data(hass, aioclient_mock):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON), patch.dict(
os.environ, {"HASSIO_TOKEN": "123456"}
):
result = await async_setup_component(hass, "hassio", {"hassio": {}})
assert result
assert aioclient_mock.call_count == 10
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
async def test_fail_setup_without_environ_var(hass):
"""Fail setup if no environ variable set."""
with patch.dict(os.environ, {}, clear=True):
result = await async_setup_component(hass, "hassio", {})
assert not result
async def test_warn_when_cannot_connect(hass, caplog):
"""Fail warn when we cannot connect."""
with patch.dict(os.environ, MOCK_ENVIRON), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value=None,
):
result = await async_setup_component(hass, "hassio", {})
assert result
assert hass.components.hassio.is_hassio()
assert "Not connected with the supervisor / system too busy!" in caplog.text
async def test_service_register(hassio_env, hass):
"""Check if service will be setup."""
assert await async_setup_component(hass, "hassio", {})
assert hass.services.has_service("hassio", "addon_start")
assert hass.services.has_service("hassio", "addon_stop")
assert hass.services.has_service("hassio", "addon_restart")
assert hass.services.has_service("hassio", "addon_update")
assert hass.services.has_service("hassio", "addon_stdin")
assert hass.services.has_service("hassio", "host_shutdown")
assert hass.services.has_service("hassio", "host_reboot")
assert hass.services.has_service("hassio", "host_reboot")
assert hass.services.has_service("hassio", "backup_full")
assert hass.services.has_service("hassio", "backup_partial")
assert hass.services.has_service("hassio", "restore_full")
assert hass.services.has_service("hassio", "restore_partial")
async def test_service_calls(hassio_env, hass, aioclient_mock, caplog):
"""Call service and check the API calls behind that."""
assert await async_setup_component(hass, "hassio", {})
aioclient_mock.post("http://127.0.0.1/addons/test/start", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/stop", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/restart", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/update", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/stdin", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/host/shutdown", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/host/reboot", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/backups/new/full", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/backups/new/partial", json={"result": "ok"})
aioclient_mock.post(
"http://127.0.0.1/backups/test/restore/full", json={"result": "ok"}
)
aioclient_mock.post(
"http://127.0.0.1/backups/test/restore/partial", json={"result": "ok"}
)
await hass.services.async_call("hassio", "addon_start", {"addon": "test"})
await hass.services.async_call("hassio", "addon_stop", {"addon": "test"})
await hass.services.async_call("hassio", "addon_restart", {"addon": "test"})
await hass.services.async_call("hassio", "addon_update", {"addon": "test"})
await hass.services.async_call(
"hassio", "addon_stdin", {"addon": "test", "input": "test"}
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 8
assert aioclient_mock.mock_calls[-1][2] == "test"
await hass.services.async_call("hassio", "host_shutdown", {})
await hass.services.async_call("hassio", "host_reboot", {})
await hass.async_block_till_done()
assert aioclient_mock.call_count == 10
await hass.services.async_call("hassio", "backup_full", {})
await hass.services.async_call(
"hassio",
"backup_partial",
{"addons": ["test"], "folders": ["ssl"], "password": "123456"},
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 12
assert aioclient_mock.mock_calls[-1][2] == {
"addons": ["test"],
"folders": ["ssl"],
"password": "123456",
}
await hass.services.async_call("hassio", "restore_full", {"slug": "test"})
await hass.async_block_till_done()
await hass.services.async_call(
"hassio",
"restore_partial",
{
"slug": "test",
"homeassistant": False,
"addons": ["test"],
"folders": ["ssl"],
"password": "123456",
},
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 14
assert aioclient_mock.mock_calls[-1][2] == {
"addons": ["test"],
"folders": ["ssl"],
"homeassistant": False,
"password": "123456",
}
async def test_service_calls_core(hassio_env, hass, aioclient_mock):
"""Call core service and check the API calls behind that."""
assert await async_setup_component(hass, "hassio", {})
aioclient_mock.post("http://127.0.0.1/homeassistant/restart", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/homeassistant/stop", json={"result": "ok"})
await hass.services.async_call("homeassistant", "stop")
await hass.async_block_till_done()
assert aioclient_mock.call_count == 4
await hass.services.async_call("homeassistant", "check_config")
await hass.async_block_till_done()
assert aioclient_mock.call_count == 4
with patch(
"homeassistant.config.async_check_ha_config_file", return_value=None
) as mock_check_config:
await hass.services.async_call("homeassistant", "restart")
await hass.async_block_till_done()
assert mock_check_config.called
assert aioclient_mock.call_count == 5
async def test_entry_load_and_unload(hass):
"""Test loading and unloading config entry."""
with patch.dict(os.environ, MOCK_ENVIRON):
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert SENSOR_DOMAIN in hass.config.components
assert BINARY_SENSOR_DOMAIN in hass.config.components
assert ADDONS_COORDINATOR in hass.data
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert ADDONS_COORDINATOR not in hass.data
async def test_migration_off_hassio(hass):
"""Test that when a user moves instance off Hass.io, config entry gets cleaned up."""
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
assert not await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.config_entries.async_entries(DOMAIN) == []
async def test_device_registry_calls(hass):
"""Test device registry entries for hassio."""
dev_reg = async_get(hass)
supervisor_mock_data = {
"addons": [
{
"name": "test",
"slug": "test",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"repository": "test",
"url": "https://github.com/home-assistant/addons/test",
},
{
"name": "test2",
"slug": "test2",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
]
}
os_mock_data = {
"board": "odroid-n2",
"boot": "A",
"update_available": False,
"version": "5.12",
"version_latest": "5.12",
}
with patch.dict(os.environ, MOCK_ENVIRON), patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value=supervisor_mock_data,
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value=os_mock_data,
):
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(dev_reg.devices) == 3
supervisor_mock_data = {
"addons": [
{
"name": "test2",
"slug": "test2",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
]
}
# Test that when addon is removed, next update will remove the add-on and subsequent updates won't
with patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value=supervisor_mock_data,
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value=os_mock_data,
):
async_fire_time_changed(hass, dt_util.now() + timedelta(hours=1))
await hass.async_block_till_done()
assert len(dev_reg.devices) == 2
async_fire_time_changed(hass, dt_util.now() + timedelta(hours=2))
await hass.async_block_till_done()
assert len(dev_reg.devices) == 2
supervisor_mock_data = {
"addons": [
{
"name": "test2",
"slug": "test2",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
{
"name": "test3",
"slug": "test3",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
]
}
# Test that when addon is added, next update will reload the entry so we register
# a new device
with patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value=supervisor_mock_data,
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value=os_mock_data,
):
async_fire_time_changed(hass, dt_util.now() + timedelta(hours=3))
await hass.async_block_till_done()
assert len(dev_reg.devices) == 3
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python import autograph
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
seed: The graph-level random seed.
"""
def __init__(self, name, read_only_collections=True):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
read_only_collections: whether to not write function graph collections
back to default graph. Defaults to True.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.structured_outputs = None
self._read_only_collections = read_only_collections
self._weak_variables = []
self.outer_graph = ops.get_default_graph()
self.captures = collections.OrderedDict()
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on inheriting
# this stack from the default graph even in eager mode. Maybe it should be
# part of the eager context? This would also allow us to remove a
# get_default_graph() call from the function cache lookup.
self._distribution_strategy_stack = graph._distribution_strategy_stack
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
if context.executing_eagerly():
self.seed = context.global_seed()
self._xla_compile = (context.context().device_spec.device_type == "TPU")
if self._distribution_strategy_stack or self._xla_compile:
self._add_device_to_stack(context.context().device_name)
else:
self.seed = graph.seed
self._xla_compile = getattr(graph, "_xla_compile", False)
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy()
if (self._distribution_strategy_stack
or self._xla_compile
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
if not self._read_only_collections:
self._collections = graph._collections
else:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
self._graph_key = graph._graph_key
# pylint: enable=protected-access
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def create_op(
self,
op_type,
inputs,
dtypes,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
experimental_autograph=False,
add_control_dependencies=True,
arg_names=None,
op_return_value=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
experimental_autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies
else:
control_manager = ops.NullContextmanager
with func_graph.as_default(), control_manager() as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None:
args = signature
kwargs = {}
func_args = _get_defun_inputs_from_args(args, arg_names)
func_kwargs = _get_defun_inputs_from_kwargs(kwargs)
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, nest.flatten(func_kwargs))
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
else:
try:
x = ops.convert_to_tensor_or_indexed_slices(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
this_tape = tape.push_new_tape()
try:
if experimental_autograph:
func_outputs = autograph.converted_call(
python_func, None,
autograph.ConversionOptions(
verbose=True,
recursive=True,
strip_decorators=(function.defun,),
optional_features=(),
), *func_args, **func_kwargs)
else:
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors and `None`s.
func_outputs = nest.map_structure(convert, func_outputs)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
tape.pop_tape(this_tape)
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
tape_variables = this_tape.watched_variables()
arg_variables = set()
inputs = []
for arg in nest.flatten(func_args) + nest.flatten(func_kwargs):
if isinstance(arg, resource_variable_ops.ResourceVariable):
try:
resource_placeholder = func_graph.captures.pop(arg.handle)
arg_variables.add(arg)
except KeyError:
# This case occurs if a Variable among the inputs is not actually
# used by the function; we still add an explicit input for it
# because the user should presumably pass the Variable as an input
# to the corresponding graph function.
resource_placeholder = _create_substitute_placeholder(arg.handle)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in tape_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1), nest.flatten(n2)):
if arg1 is not arg2:
raise ValueError(errmsg)
def flatten(sequence):
"""A wrapper around `nest.flatten` that also unpacks `IndexedSlices`."""
# TODO(akshayka): Support `SparseTensor` in a similar fashion.
flat_sequence = nest.flatten(sequence)
outputs = []
for item in flat_sequence:
if isinstance(item, ops.IndexedSlices):
if item.dense_shape is not None:
outputs.extend([item.values, item.indices, item.dense_shape])
else:
outputs.extend([item.values, item.indices])
else:
outputs.append(item)
return outputs
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(args, names, structure=args)
def _get_defun_inputs(flat_args, names, structure):
"""Maps python function args to graph-construction inputs.
Args:
flat_args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`flat_args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
Returns:
Placeholders with the same structure as `structure`.
"""
function_inputs = []
if names is None:
names = [None] * len(flat_args)
for arg_value, name in zip(flat_args, names):
for arg in nest.flatten(arg_value):
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder = graph_placeholder(
arg.dtype, arg.shape,
name=requested_name)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
else:
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs)
def _get_defun_inputs_from_kwargs(kwargs):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, flat_args = zip(*sorted(kwargs.items()))
else:
names = []
flat_args = []
return _get_defun_inputs(flat_args, names, structure=kwargs)
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import threading, time
import paddle
from paddle.static import sparsity
import numpy as np
class TestASPUtils(unittest.TestCase):
def test_get_check_method(self):
self.assertEqual(
paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method(
paddle.fluid.contrib.sparsity.MaskAlgo.MASK_1D),
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D)
self.assertEqual(
paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method(
paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_GREEDY),
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D)
self.assertEqual(
paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method(
paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_BEST),
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D)
def test_density(self):
x = np.array([[1.0, 1.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0]])
self.assertEqual(sparsity.calculate_density(x), 0.56)
x[:, 0] = 0.0
self.assertEqual(sparsity.calculate_density(x), 0.4)
def test_check_mask_1d(self):
x = np.array([[1.0, 0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0]])
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_1d(x, 3, 4))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 5))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_1d(x, 3, 5))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 3, 6))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_1d(x, 4, 6))
def test_get_mask_1d(self):
for _ in range(10):
x = np.random.randint(10, size=(5, 5))
x = paddle.fluid.contrib.sparsity.get_mask_1d(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4))
x = np.random.randn(5, 4)
x = paddle.fluid.contrib.sparsity.get_mask_1d(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4))
def test_check_mask_2d(self):
x = np.array([[1.0, 0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0]])
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_2d(x, 3, 4))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 5))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_2d(x, 3, 5))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 3, 6))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_2d(x, 4, 6))
def test_get_mask_2d_greedy(self):
for _ in range(10):
x = np.random.randint(10, size=(5, 5))
x = paddle.fluid.contrib.sparsity.get_mask_2d_greedy(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
x = np.random.randn(5, 4)
x = paddle.fluid.contrib.sparsity.get_mask_2d_greedy(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
def test_get_mask_2d_best(self):
for _ in range(10):
x = np.random.randint(10, size=(5, 5))
x = paddle.fluid.contrib.sparsity.get_mask_2d_best(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
x = np.random.randn(5, 4)
x = paddle.fluid.contrib.sparsity.get_mask_2d_best(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
def test_threadsafe_valid_2d_patterns(self):
def get_reference(m=4, n=2):
from itertools import permutations
patterns = np.zeros(m)
patterns[:n] = 1
patterns = list(set(permutations(patterns.tolist())))
patterns = patterns + patterns
patterns = np.asarray(list(set(permutations(patterns, m))))
valid = ((patterns.sum(axis=1) <= n).sum(axis=1) == m
).nonzero()[0].reshape(-1)
valid_patterns = np.empty((valid.shape[0], m, m))
valid_patterns[:] = patterns[valid[:]]
return valid_patterns
for _ in range(4):
computing_thread = threading.Thread(
target=paddle.fluid.contrib.sparsity.utils.
_compute_valid_2d_patterns,
args=(2, 4))
computing_thread.start()
time.sleep(3)
patterns_map = paddle.fluid.contrib.sparsity.utils._valid_2d_patterns
reference_patterns = get_reference()
reference_key = '4_2'
self.assertTrue(reference_key in patterns_map)
self.assertTrue(len(patterns_map) == 1)
self.assertTrue((reference_patterns == patterns_map[reference_key]).all(
))
def test_check_sparsity(self):
for _ in range(10):
x = np.random.randint(10, size=(5))
x_2d = x.reshape(1, x.shape[0])
self.__test_1D_2D_sparsity_checking_methods(x_2d)
x = np.random.randint(10, size=(5, 5))
x_2d = x
self.__test_1D_2D_sparsity_checking_methods(x_2d)
x = np.random.randint(10, size=(5, 5, 5))
x_2d = x.reshape(x.shape[0] * x.shape[1], x.shape[2])
self.__test_1D_2D_sparsity_checking_methods(x_2d)
x = np.random.randint(10, size=(5, 5, 5, 5))
x_2d = x.reshape(x.shape[0], x.shape[1] * x.shape[2] * x.shape[3])
self.__test_1D_2D_sparsity_checking_methods(x_2d)
def test_create_mask(self):
for _ in range(10):
x = np.random.randint(10, size=(5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
x = np.random.randint(10, size=(5, 5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
x = np.random.randint(10, size=(5, 5, 5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
x = np.random.randint(10, size=(5, 5, 5, 5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
def __test_1D_2D_sparsity_checking_methods(self, x_2d):
mask = paddle.fluid.contrib.sparsity.get_mask_1d(x_2d, 2, 4)
self.assertEqual(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D,
n=2,
m=4),
paddle.fluid.contrib.sparsity.check_mask_1d(mask, 2, 4))
mask = paddle.fluid.contrib.sparsity.get_mask_2d_best(x_2d, 2, 4)
self.assertEqual(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D,
n=2,
m=4),
paddle.fluid.contrib.sparsity.check_mask_2d(mask, 2, 4))
def __test_1D_2D_sparse_mask_generation_methods(self, x):
mask = paddle.fluid.contrib.sparsity.create_mask(
x,
func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_1D,
n=2,
m=4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D,
n=2,
m=4))
mask = paddle.fluid.contrib.sparsity.create_mask(
x,
func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_GREEDY,
n=2,
m=4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D,
n=2,
m=4))
mask = paddle.fluid.contrib.sparsity.create_mask(
x,
func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_BEST,
n=2,
m=4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D,
n=2,
m=4))
|
|
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason@jorendorff.com> (and others - see the url!)
Date: 23 Feb 2003
"""
# TODO
# - Is __iter__ worth the trouble? It breaks the sequence
# protocol and breaks compatibility with str/unicode.
# - Perhaps support arguments to touch().
# - Note: __add__() technically has a bug, I think, where
# it doesn't play nice with other types that implement
# __radd__(). Test this.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
from __future__ import generators
import sys, os, fnmatch, glob, shutil, codecs
__version__ = '1.2'
__all__ = ['path']
# Pre-2.3 support. Are unicode filenames supported?
_base = str
try:
if os.path.supports_unicode_filenames:
_base = unicode
except AttributeError:
pass
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
def __iter__(self):
return iter(self.listdir())
# Adding a path and a string yields a path.
def __add__(self, more):
return path(_base(self) + more)
def __radd__(self, other):
return path(other + _base(self))
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return path(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd():
""" Return the current working directory as a path object. """
return path(os.getcwd())
getcwd = staticmethod(getcwd)
# --- Operations on path strings.
def abspath(self): return path(os.path.abspath(self))
def normcase(self): return path(os.path.normcase(self))
def normpath(self): return path(os.path.normpath(self))
def realpath(self): return path(os.path.realpath(self))
def expanduser(self): return path(os.path.expanduser(self))
def expandvars(self): return path(os.path.expandvars(self))
def dirname(self): return path(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return path(drive)
parent = property(dirname)
name = property(basename)
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers. """)
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return path(parent), child
def splitdrive(self):
drive, rel = os.path.splitdrive(self)
return path(drive), rel
def splitext(self):
# Cast to plain string using _base because Python 2.2
# implementations of os.path.splitext use "for c in path:..."
# which means something different when applied to a path
# object.
filename, ext = os.path.splitext(_base(self))
return path(filename), ext
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return path(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return path(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return path(os.path.join(self, *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = path(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = path(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
return path(os.curdir)
else:
return path(os.path.join(*segments))
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
"""
for child in self:
if pattern is None or child.fnmatch(pattern):
yield child
if child.isdir():
for item in child.walk(pattern):
yield item
def walkdirs(self, pattern=None):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
"""
for child in self:
if child.isdir():
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern):
yield subsubdir
def walkfiles(self, pattern=None):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
for child in self:
if child.isfile():
if pattern is None or child.fnmatch(pattern):
yield child
elif child.isdir():
for f in child.walkfiles(pattern):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
return map(path, glob.glob(_base(self / pattern)))
# --- Reading an entire file at once.
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = file(self, 'rb')
try:
return f.read()
finally:
f.close()
def text(self, encoding=None, errors='strict'):
""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = file(self, _textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return t.replace(u'\r\n', u'\n').replace(u'\r', u'\n')
def lines(self, encoding=None, errors='strict', retain=True):
""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = file(self, _textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
# --- Methods for querying the filesystem.
exists = os.path.exists
isabs = os.path.isabs
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return path(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
|
|
# Copyright 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests to Nova.
"""
from keystoneauth1 import loading as ks_loading
from novaclient import client as nova_client
from novaclient import exceptions as nova_exception
from novaclient import utils
from oslo_config import cfg
import six
from manila.common import client_auth
from manila.common.config import core_opts
from manila.db import base
from manila import exception
from manila.i18n import _
NOVA_GROUP = 'nova'
AUTH_OBJ = None
nova_deprecated_opts = [
cfg.StrOpt('nova_admin_username',
default='nova',
help='Nova admin username.',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="This option isn't used any longer. Please "
"use [nova] username instead."),
cfg.StrOpt('nova_admin_password',
help='Nova admin password.',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="This option isn't used any longer. Please "
"use [nova] password instead."),
cfg.StrOpt('nova_admin_tenant_name',
default='service',
help='Nova admin tenant name.',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="This option isn't used any longer. Please "
"use [nova] tenant instead."),
cfg.StrOpt('nova_admin_auth_url',
default='http://localhost:5000/v2.0',
help='Identity service URL.',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="This option isn't used any longer. Please "
"use [nova] url instead."),
cfg.StrOpt('nova_catalog_info',
default='compute:nova:publicURL',
help='Info to match when looking for nova in the service '
'catalog. Format is separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="This option isn't used any longer."),
cfg.StrOpt('nova_catalog_admin_info',
default='compute:nova:adminURL',
help='Same as nova_catalog_info, but for admin endpoint.',
deprecated_group='DEFAULT',
deprecated_for_removal=True,
deprecated_reason="This option isn't used any longer."),
]
nova_opts = [
cfg.StrOpt('api_microversion',
default='2.10',
deprecated_group="DEFAULT",
deprecated_name="nova_api_microversion",
help='Version of Nova API to be used.'),
cfg.StrOpt('ca_certificates_file',
deprecated_group="DEFAULT",
deprecated_name="nova_ca_certificates_file",
help='Location of CA certificates file to use for nova client '
'requests.'),
cfg.BoolOpt('api_insecure',
default=False,
deprecated_group="DEFAULT",
deprecated_name="nova_api_insecure",
help='Allow to perform insecure SSL requests to nova.'),
cfg.StrOpt('endpoint_type',
default='publicURL',
help='Endpoint type to be used with nova client calls.'),
cfg.StrOpt('region_name',
help='Region name for connecting to nova.'),
]
CONF = cfg.CONF
CONF.register_opts(nova_deprecated_opts)
CONF.register_opts(core_opts)
CONF.register_opts(nova_opts, NOVA_GROUP)
ks_loading.register_session_conf_options(CONF, NOVA_GROUP)
ks_loading.register_auth_conf_options(CONF, NOVA_GROUP)
def list_opts():
return client_auth.AuthClientLoader.list_opts(NOVA_GROUP)
def novaclient(context):
global AUTH_OBJ
if not AUTH_OBJ:
deprecated_opts_for_v2 = {
'username': CONF.nova_admin_username,
'password': CONF.nova_admin_password,
'tenant_name': CONF.nova_admin_tenant_name,
'auth_url': CONF.nova_admin_auth_url,
}
AUTH_OBJ = client_auth.AuthClientLoader(
client_class=nova_client.Client,
exception_module=nova_exception,
cfg_group=NOVA_GROUP,
deprecated_opts_for_v2=deprecated_opts_for_v2)
return AUTH_OBJ.get_client(context,
version=CONF[NOVA_GROUP].api_microversion,
insecure=CONF[NOVA_GROUP].api_insecure,
cacert=CONF[NOVA_GROUP].ca_certificates_file,
endpoint_type=CONF[NOVA_GROUP].endpoint_type,
region_name=CONF[NOVA_GROUP].region_name)
def _untranslate_server_summary_view(server):
"""Maps keys for servers summary view."""
d = {}
d['id'] = server.id
d['status'] = server.status
d['flavor'] = server.flavor['id']
d['name'] = server.name
d['image'] = server.image['id']
d['created'] = server.created
d['addresses'] = server.addresses
d['networks'] = server.networks
d['tenant_id'] = server.tenant_id
d['user_id'] = server.user_id
d['security_groups'] = getattr(server, 'security_groups', [])
return d
def _to_dict(obj):
if isinstance(obj, dict):
return obj
elif hasattr(obj, 'to_dict'):
return obj.to_dict()
else:
return obj.__dict__
def translate_server_exception(method):
"""Transforms the exception for the instance.
Note: keeps its traceback intact.
"""
@six.wraps(method)
def wrapper(self, ctx, instance_id, *args, **kwargs):
try:
res = method(self, ctx, instance_id, *args, **kwargs)
return res
except nova_exception.ClientException as e:
if isinstance(e, nova_exception.NotFound):
raise exception.InstanceNotFound(instance_id=instance_id)
elif isinstance(e, nova_exception.BadRequest):
raise exception.InvalidInput(reason=six.text_type(e))
else:
raise exception.ManilaException(e)
return wrapper
class API(base.Base):
"""API for interacting with novaclient."""
def server_create(self, context, name, image, flavor, key_name=None,
user_data=None, security_groups=None,
block_device_mapping=None,
block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1,
admin_pass=None, meta=None):
return _untranslate_server_summary_view(
novaclient(context).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups, key_name=key_name,
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
min_count=instance_count, admin_pass=admin_pass,
meta=meta)
)
def server_delete(self, context, instance):
novaclient(context).servers.delete(instance)
@translate_server_exception
def server_get(self, context, instance_id):
return _untranslate_server_summary_view(
novaclient(context).servers.get(instance_id)
)
def server_get_by_name_or_id(self, context, instance_name_or_id):
try:
server = utils.find_resource(
novaclient(context).servers, instance_name_or_id)
except nova_exception.CommandError:
# we did not find the server in the current tenant,
# and proceed searching in all tenants
try:
server = utils.find_resource(
novaclient(context).servers, instance_name_or_id,
all_tenants=True)
except nova_exception.CommandError as e:
msg = _("Failed to get Nova VM. %s") % e
raise exception.ManilaException(msg)
return _untranslate_server_summary_view(server)
@translate_server_exception
def server_pause(self, context, instance_id):
novaclient(context).servers.pause(instance_id)
@translate_server_exception
def server_unpause(self, context, instance_id):
novaclient(context).servers.unpause(instance_id)
@translate_server_exception
def server_suspend(self, context, instance_id):
novaclient(context).servers.suspend(instance_id)
@translate_server_exception
def server_resume(self, context, instance_id):
novaclient(context).servers.resume(instance_id)
@translate_server_exception
def server_reboot(self, context, instance_id, soft_reboot=False):
hardness = 'SOFT' if soft_reboot else 'HARD'
novaclient(context).servers.reboot(instance_id, hardness)
@translate_server_exception
def server_rebuild(self, context, instance_id, image_id, password=None):
return _untranslate_server_summary_view(
novaclient(context).servers.rebuild(instance_id, image_id,
password)
)
@translate_server_exception
def instance_volume_attach(self, context, instance_id, volume_id,
device=None):
if device == 'auto':
device = None
return novaclient(context).volumes.create_server_volume(instance_id,
volume_id,
device)
@translate_server_exception
def instance_volume_detach(self, context, instance_id, att_id):
return novaclient(context).volumes.delete_server_volume(instance_id,
att_id)
@translate_server_exception
def instance_volumes_list(self, context, instance_id):
from manila.volume import cinder
volumes = novaclient(context).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinder.cinderclient(context).volumes.get(volume.id)
volume.name = volume_data.name
return volumes
@translate_server_exception
def server_update(self, context, instance_id, name):
return _untranslate_server_summary_view(
novaclient(context).servers.update(instance_id, name=name)
)
def update_server_volume(self, context, instance_id, volume_id,
new_volume_id):
novaclient(context).volumes.update_server_volume(instance_id,
volume_id,
new_volume_id)
def keypair_create(self, context, name):
return novaclient(context).keypairs.create(name)
def keypair_import(self, context, name, public_key):
return novaclient(context).keypairs.create(name, public_key)
def keypair_delete(self, context, keypair_id):
novaclient(context).keypairs.delete(keypair_id)
def keypair_list(self, context):
return novaclient(context).keypairs.list()
def image_list(self, context):
client = novaclient(context)
if hasattr(client, 'images'):
# Old novaclient with 'images' API proxy
return client.images.list()
# New novaclient without 'images' API proxy
return client.glance.list()
def add_security_group_to_server(self, context, server, security_group):
return novaclient(context).servers.add_security_group(server,
security_group)
|
|
from __future__ import print_function, absolute_import
from builtins import str
import os
import copy
import shutil
from os.path import relpath, join, exists, dirname, basename
from os import makedirs, remove
from json import load
from tools.export.exporters import Exporter, apply_supported_whitelist
from tools.targets import TARGET_MAP
from tools.utils import NotSupportedException
from tools.build_api import prepare_toolchain
POST_BINARY_WHITELIST = set([
"TEENSY3_1Code.binary_hook",
"MCU_NRF51Code.binary_hook",
"LPCTargetCode.lpc_patch",
"LPC4088Code.binary_hook"
])
class GNUARMNetbeans(Exporter):
NAME = 'GNU ARM Netbeans'
TOOLCHAIN = 'GCC_ARM'
@classmethod
def is_target_supported(cls, target_name):
target = TARGET_MAP[target_name]
return apply_supported_whitelist(
cls.TOOLCHAIN, POST_BINARY_WHITELIST, target)
@staticmethod
def prepare_sys_lib(libname):
return "-l" + libname
@staticmethod
def get_defines_and_remove_from_flags(flags_in, str_key):
defines = []
flags_temp = copy.deepcopy(flags_in)
for f in flags_temp[str_key]:
f = f.strip()
if f.startswith('-D'):
defines.append(f[2:])
flags_in[str_key].remove(f)
return defines
@staticmethod
def get_includes_and_remove_from_flags(flags_in, str_key):
includes = []
flags_temp = copy.deepcopy(flags_in)
next_is_include = False
for f in flags_temp[str_key]:
f = f.strip()
if next_is_include:
includes.append(f)
flags_in[str_key].remove(f)
next_is_include = False
continue
if f == "-include":
flags_in[str_key].remove(f)
next_is_include = True
return includes
@staticmethod
def get_c_std_and_remove_from_flag(flags_in, str_key):
comp_std = ''
c_std = {
'c90': 'c90', 'c89': 'c90', 'gnu90': 'gnu90', 'gnu89': 'gnu90',
'c99': 'c99', 'c9x': 'c99', 'gnu99': 'gnu99', 'gnu9x': 'gnu98',
'c11': 'c11', 'c1x': 'c11', 'gnu11': 'gnu11', 'gnu1x': 'gnu11'
}
cpp_std = {
'c++98': 'cpp98', 'c++03': 'cpp98',
'gnu++98': 'gnucpp98', 'gnu++03': 'gnucpp98',
'c++0x': 'cpp0x', 'gnu++0x': 'gnucpp0x',
'c++11': 'cpp11', 'gnu++11': 'gnucpp11',
'c++1y': 'cpp1y', 'gnu++1y': 'gnucpp1y',
'c++14': 'cpp14', 'gnu++14': 'gnucpp14',
'c++1z': 'cpp1z', 'gnu++1z': 'gnucpp1z',
}
flags_temp = copy.deepcopy(flags_in)
for f in flags_temp[str_key]:
f = f.strip()
if f.startswith('-std='):
comp_std = f[len('-std='):]
flags_in[str_key].remove(f)
elif f.startswith('-'):
std = f[len('-'):]
if std in c_std or std in cpp_std:
comp_std = std
flags_in[str_key].remove(f)
return comp_std
def validate_resources(self):
if not self.resources.linker_script:
raise NotSupportedException("No linker script found.")
def create_jinja_ctx(self):
self.options = {}
flags = {}
self.validate_resources()
# Convert all Backslashes to Forward Slashes
self.resources.win_to_unix()
self.ld_script = self.filter_dot(
self.resources.linker_script)
# Read in all profiles, we'll extract compiler options.
profiles = self.get_all_profiles()
profile_ids = [s.lower() for s in profiles]
profile_ids.sort()
for prof_id in profile_ids:
# There are 4 categories of options, a category common too
# all tools and a specific category for each of the tools.
opts = {}
opts['defines'] = {}
opts['common'] = {}
opts['as'] = {}
opts['c'] = {}
opts['cpp'] = {}
opts['ld'] = {}
opts['id'] = prof_id
opts['name'] = opts['id'].capitalize()
profile = profiles[prof_id]
# A small hack, do not bother with src_path again,
# pass an empty string to avoid crashing.
src_paths = ['']
target_name = self.toolchain.target.name
toolchain = prepare_toolchain(
src_paths, "", target_name, self.TOOLCHAIN, build_profile=[profile])
flags = self.toolchain_flags(toolchain)
opts['defines'] = self.get_defines_and_remove_from_flags(flags, 'common_flags')
opts['forced_includes'] = self.get_includes_and_remove_from_flags(flags, 'common_flags')
opts['common'] = flags['common_flags']
opts['as'] = flags['asm_flags']
opts['c'] = flags['c_flags']
opts['cpp'] = flags['cxx_flags']
opts['ld'] = flags['ld_flags']
self.options[prof_id] = opts
sources = [] # list of strings
forced_includes = self.get_includes_and_remove_from_flags(flags, 'c_flags')
forced_includes += self.get_includes_and_remove_from_flags(flags, 'cxx_flags')
# Remove Duplicates
forced_includes = list(set(forced_includes))
c_std = self.get_c_std_and_remove_from_flag(flags, 'c_flags')
cpp_std = self.get_c_std_and_remove_from_flag(flags, 'cxx_flags')
# Make one list of all resources
for r_type in ['c_sources', 's_sources', 'cpp_sources']:
sources.extend(getattr(self.resources, r_type))
# Remove all leading './'
c_sources = [self.filter_dot(field) for field in self.resources.c_sources]
cpp_sources = [self.filter_dot(field) for field in self.resources.cpp_sources]
s_sources = [self.filter_dot(field) for field in self.resources.s_sources]
headers = [self.filter_dot(field) for field in self.resources.headers]
sources = [self.filter_dot(field) for field in sources]
include_paths = [self.filter_dot(field) for field in self.resources.inc_dirs]
sys_libs = [self.prepare_sys_lib(lib) for lib
in self.toolchain.sys_libs]
preproc = " ".join([basename(self.toolchain.preproc[0])] +
self.toolchain.preproc[1:] +
self.toolchain.ld[1:])
if 'nbproject' in include_paths:
include_paths.remove('nbproject')
jinja_ctx = {
'name': self.project_name,
'target': self.toolchain.target.name,
'elf_location': join('BUILD', self.project_name) + '.elf',
'c_symbols': self.toolchain.get_symbols(),
'asm_symbols': self.toolchain.get_symbols(True),
'c_flags': flags['c_flags'],
'cxx_flags': flags['cxx_flags'],
'ld_flags': self.flags['ld_flags'],
'asm_flags': self.flags['asm_flags'],
'common_flags': self.flags['common_flags'],
'include_paths': include_paths,
'forced_includes': forced_includes,
'c_sources': c_sources,
'cpp_sources': cpp_sources,
's_sources': s_sources,
'headers': headers,
'headers_folder': self.get_netbeans_file_list(sorted(headers)),
'sources_folder': self.get_netbeans_file_list(sorted(sources)),
'options': self.options,
'c_std': self.get_netbeans_c_std(c_std),
'cpp_std': self.get_netbeans_cpp_std(cpp_std),
'linker_script': self.ld_script,
'linker_libs': sys_libs,
'pp_cmd': preproc,
'cc_cmd': self.toolchain.cc[0],
'cppc_cmd': self.toolchain.cppc[0],
'asm_cmd': self.toolchain.asm[0],
'ld_cmd': self.toolchain.ld[0],
'elf2bin_cmd': self.toolchain.elf2bin
}
return jinja_ctx
def generate(self):
"""Generate Makefile, configurations.xml & project.xml Netbeans project file
"""
jinja_ctx = self.create_jinja_ctx()
if not exists(join(self.export_dir, 'nbproject')):
makedirs(join(self.export_dir, 'nbproject'))
self.gen_file('nb/configurations.tmpl', jinja_ctx, 'nbproject/configurations.xml')
self.gen_file('nb/project.tmpl', jinja_ctx, 'nbproject/project.xml')
self.gen_file_nonoverwrite('nb/mbedignore.tmpl', jinja_ctx,
'.mbedignore')
self.gen_file('nb/Makefile.tmpl', jinja_ctx, 'Makefile')
print('Done. Import the \'{0}\' project in Netbeans.'.format(self.project_name))
@staticmethod
def clean(_):
shutil.rmtree("nbproject")
remove("Makefile")
# -------------------------------------------------------------------------
@staticmethod
def filter_dot(str_in):
"""
Remove the './' prefix, if present.
This function assumes that resources.win_to_unix()
replaced all windows backslashes with slashes.
"""
if str_in is None:
return None
if str_in[:2] == './':
return str_in[2:]
return str_in
# -------------------------------------------------------------------------
@staticmethod
def get_all_profiles():
tools_path = dirname(dirname(dirname(__file__)))
file_names = [join(tools_path, "profiles", fn) for fn in os.listdir(
join(tools_path, "profiles")) if fn.endswith(".json")]
profiles = {}
for fn in file_names:
content = load(open(fn))
profile_name = basename(fn).replace(".json", "")
profiles[profile_name] = content
return profiles
@staticmethod
def get_netbeans_file_list(file_list):
cur_dir = ''
prev_dir = ''
output = []
folder_count = 1
dir_depth = 0
for item in file_list:
cur_dir = os.path.dirname(item)
dir_temp = os.path.normpath(cur_dir)
prev_dir_temp = os.path.normpath(prev_dir)
dir_list = dir_temp.split(os.sep)
prev_dir_list = prev_dir_temp.split(os.sep)
dir_depth = len(dir_list)
# Current File is in Directory: Compare the given dir with previous Dir
if cur_dir and prev_dir != cur_dir:
# evaluate all matched items (from current and previous list)
matched = []
# Compare the Element in Previous Dir with the Elements in Current Dir
# and add the equal Elements to the match-List
for elem_prev_dir, elem_cur_dir in zip(prev_dir_list, dir_list):
if elem_prev_dir == elem_cur_dir:
matched.append(elem_cur_dir)
# calculate difference between matched and length
diff = dir_depth - len(matched)
# if previous dir was not root
if prev_dir != '':
# if the elements count is not equal we calculate the difference
if len(dir_list) != len(prev_dir_list):
dir_depth_prev = len(prev_dir_list)
delta = dir_depth_prev - len(matched)
for i in range(dir_depth_prev - delta, dir_depth_prev):
output.append('</logicalFolder>')
# if the elements count is equal, we subtract the matched length from the total length
else:
for i in range(len(matched), len(dir_list)):
output.append('</logicalFolder>')
for i in range(dir_depth - diff, dir_depth):
output.append('<logicalFolder name="f' + str(folder_count) + '" displayName="' + str(
dir_list[i]) + '" projectFiles="true">')
folder_count += 1
# Current File is in root
else:
# Close Tag if we are in root and the previous dir wasn't
if cur_dir == '' and prev_dir != '':
for i in range(0, len(prev_dir_list)):
output.append('</logicalFolder>')
# Save the Current Dir
prev_dir = cur_dir
output.append('<itemPath>' + str(item) + '</itemPath>')
if cur_dir != '':
# close all open tags
output.append('</logicalFolder>' * dir_depth)
return output
@staticmethod
def get_netbeans_c_std(c_std):
c_std_netbeans = 0
if '89' in c_std:
c_std_netbeans = 2
elif '99' in c_std:
c_std_netbeans = 3
elif '11' in c_std:
c_std_netbeans = 10
return c_std_netbeans
@staticmethod
def get_netbeans_cpp_std(cpp_std):
cpp_std_netbeans = 0
if '98' in cpp_std:
cpp_std_netbeans = 4
elif '11' in cpp_std:
cpp_std_netbeans = 8
elif '14' in cpp_std:
cpp_std_netbeans = 11
return cpp_std_netbeans
|
|
# -*- coding: utf-8 -*-
"""
Class for reading data from Alpha Omega .map files.
This class is an experimental reader with important limitations.
See the source code for details of the limitations.
The code of this reader is of alpha quality and received very limited testing.
This code is written from the incomplete file specifications available in:
[1] AlphaMap Data Acquisition System User's Manual Version 10.1.1
Section 5 APPENDIX B: ALPHAMAP FILE STRUCTURE, pages 120-140
Edited by ALPHA OMEGA Home Office: P.O. Box 810, Nazareth Illit 17105, Israel
http://www.alphaomega-eng.com/
and from the source code of a C software for conversion of .map files to
.eeg elan software files :
[2] alphamap2eeg 1.0, 12/03/03, Anne CHEYLUS - CNRS ISC UMR 5015
Supported : Read
@author : sgarcia, Florent Jaillet
"""
# NOTE: For some specific types of comments, the following convention is used:
# "TODO:" Desirable future evolution
# "WARNING:" Information about code that is based on broken or missing
# specifications and that might be wrong
# Main limitations of this reader:
# - The reader is only able to load data stored in data blocks of type 5
# (data block for one channel). In particular it means that it doesn't
# support signals stored in blocks of type 7 (data block for multiple
# channels).
# For more details on these data blocks types, see 5.4.1 and 5.4.2 p 127 in
# [1].
# - Rather than supporting all the neo objects types that could be extracted
# from the file, all read data are returned in AnalogSignal objects, even for
# digital channels or channels containing spiking informations.
# - Digital channels are not converted to events or events array as they
# should.
# - Loading multichannel signals as AnalogSignalArrays is not supported.
# - Many data or metadata that are avalaible in the file and that could be
# represented in some way in the neo model are not extracted. In particular
# scaling of the data and extraction of the units of the signals are not
# supported.
# - It received very limited testing, exlusively using python 2.6.6. In
# particular it has not been tested using Python 3.x.
#
# These limitations are mainly due to the following reasons:
# - Incomplete, unclear and in some places innacurate specifications of the
# format in [1].
# - Lack of test files containing all the types of data blocks of interest
# (in particular no file with type 7 data block for multiple channels where
# available when writing this code).
# - Lack of knowledge of the Alphamap software and the associated data models.
# - Lack of time (especially as the specifications are incomplete, a lot of
# reverse engineering and testing is required, which makes the development of
# this IO very painful and long).
# needed for python 3 compatibility
from __future__ import absolute_import, division
# specific imports
import datetime
import os
import struct
# file no longer exists in Python3
try:
file
except NameError:
import io
file = io.BufferedReader
# note neo.core need only numpy and quantities
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Block, Segment, AnalogSignal
from neo.io.tools import populate_RecordingChannel
class AlphaOmegaIO(BaseIO):
"""
Class for reading data from Alpha Omega .map files (experimental)
This class is an experimental reader with important limitations.
See the source code for details of the limitations.
The code of this reader is of alpha quality and received very limited
testing.
Usage:
>>> from neo import io
>>> r = io.AlphaOmegaIO( filename = 'File_AlphaOmega_1.map')
>>> blck = r.read_block(lazy = False, cascade = True)
>>> print blck.segments[0].analogsignals
"""
is_readable = True # This is a reading only class
is_writable = False # writting is not supported
# This class is able to directly or inderectly read the following kind of
# objects
supported_objects = [ Block, Segment , AnalogSignal]
# TODO: Add support for other objects that should be extractable from .map
# files (AnalogSignalArray, Event, EventArray, Epoch?, Epoch Array?,
# Spike?, SpikeTrain?)
# This class can only return a Block
readable_objects = [ Block ]
# TODO : create readers for different type of objects (Segment,
# AnalogSignal,...)
# This class is not able to write objects
writeable_objects = [ ]
# This is for GUI stuff : a definition for parameters when reading.
read_params = { Block : [ ] }
# Writing is not supported, so no GUI stuff
write_params = None
name = 'AlphaOmega'
extensions = [ 'map' ]
mode = 'file'
def __init__(self , filename = None) :
"""
Arguments:
filename : the .map Alpha Omega file name
"""
BaseIO.__init__(self)
self.filename = filename
# write is not supported so I do not overload write method from BaseIO
def read_block(self,
# the 2 first keyword arguments are imposed by neo.io API
lazy = False,
cascade = True):
"""
Return a Block.
"""
def count_samples(m_length):
"""
Count the number of signal samples available in a type 5 data block
of length m_length
"""
# for information about type 5 data block, see [1]
count = int((m_length-6)/2-2)
# -6 corresponds to the header of block 5, and the -2 take into
# account the fact that last 2 values are not available as the 4
# corresponding bytes are coding the time stamp of the beginning
# of the block
return count
# create the neo Block that will be returned at the end
blck = Block(file_origin = os.path.basename(self.filename))
blck.file_origin = os.path.basename(self.filename)
fid = open(self.filename, 'rb')
# NOTE: in the following, the word "block" is used in the sense used in
# the alpha-omega specifications (ie a data chunk in the file), rather
# than in the sense of the usual Block object in neo
# step 1: read the headers of all the data blocks to load the file
# structure
pos_block = 0 # position of the current block in the file
file_blocks = [] # list of data blocks available in the file
if not cascade:
# we read only the main header
m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
# m_TypeBlock should be 'h', as we read the first block
block = HeaderReader(fid,
dict_header_type.get(m_TypeBlock,
Type_Unknown)).read_f()
block.update({'m_length': m_length,
'm_TypeBlock': m_TypeBlock,
'pos': pos_block})
file_blocks.append(block)
else: # cascade == True
seg = Segment(file_origin = os.path.basename(self.filename))
seg.file_origin = os.path.basename(self.filename)
blck.segments.append(seg)
while True:
first_4_bytes = fid.read(4)
if len(first_4_bytes) < 4:
# we have reached the end of the file
break
else:
m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)
block = HeaderReader(fid,
dict_header_type.get(m_TypeBlock,
Type_Unknown)).read_f()
block.update({'m_length': m_length,
'm_TypeBlock': m_TypeBlock,
'pos': pos_block})
if m_TypeBlock == '2':
# The beggining of the block of type '2' is identical for
# all types of channels, but the following part depends on
# the type of channel. So we need a special case here.
# WARNING: How to check the type of channel is not
# described in the documentation. So here I use what is
# proposed in the C code [2].
# According to this C code, it seems that the 'm_isAnalog'
# is used to distinguished analog and digital channels, and
# 'm_Mode' encodes the type of analog channel:
# 0 for continuous, 1 for level, 2 for external trigger.
# But in some files, I found channels that seemed to be
# continuous channels with 'm_Modes' = 128 or 192. So I
# decided to consider every channel with 'm_Modes'
# different from 1 or 2 as continuous. I also couldn't
# check that values of 1 and 2 are really for level and
# external trigger as I had no test files containing data
# of this types.
type_subblock = 'unknown_channel_type(m_Mode=' \
+ str(block['m_Mode'])+ ')'
description = Type2_SubBlockUnknownChannels
block.update({'m_Name': 'unknown_name'})
if block['m_isAnalog'] == 0:
# digital channel
type_subblock = 'digital'
description = Type2_SubBlockDigitalChannels
elif block['m_isAnalog'] == 1:
# analog channel
if block['m_Mode'] == 1:
# level channel
type_subblock = 'level'
description = Type2_SubBlockLevelChannels
elif block['m_Mode'] == 2:
# external trigger channel
type_subblock = 'external_trigger'
description = Type2_SubBlockExtTriggerChannels
else:
# continuous channel
type_subblock = 'continuous(Mode' \
+ str(block['m_Mode']) +')'
description = Type2_SubBlockContinuousChannels
subblock = HeaderReader(fid, description).read_f()
block.update(subblock)
block.update({'type_subblock': type_subblock})
file_blocks.append(block)
pos_block += m_length
fid.seek(pos_block)
# step 2: find the available channels
list_chan = [] # list containing indexes of channel blocks
for ind_block, block in enumerate(file_blocks):
if block['m_TypeBlock'] == '2':
list_chan.append(ind_block)
# step 3: find blocks containing data for the available channels
list_data = [] # list of lists of indexes of data blocks
# corresponding to each channel
for ind_chan, chan in enumerate(list_chan):
list_data.append([])
num_chan = file_blocks[chan]['m_numChannel']
for ind_block, block in enumerate(file_blocks):
if block['m_TypeBlock'] == '5':
if block['m_numChannel'] == num_chan:
list_data[ind_chan].append(ind_block)
# step 4: compute the length (number of samples) of the channels
chan_len = np.zeros(len(list_data), dtype = np.int)
for ind_chan, list_blocks in enumerate(list_data):
for ind_block in list_blocks:
chan_len[ind_chan] += count_samples(
file_blocks[ind_block]['m_length'])
# step 5: find channels for which data are available
ind_valid_chan = np.nonzero(chan_len)[0]
# step 6: load the data
# TODO give the possibility to load data as AnalogSignalArrays
for ind_chan in ind_valid_chan:
list_blocks = list_data[ind_chan]
ind = 0 # index in the data vector
# read time stamp for the beginning of the signal
form = '<l' # reading format
ind_block = list_blocks[0]
count = count_samples(file_blocks[ind_block]['m_length'])
fid.seek(file_blocks[ind_block]['pos']+6+count*2)
buf = fid.read(struct.calcsize(form))
val = struct.unpack(form , buf)
start_index = val[0]
# WARNING: in the following blocks are read supposing taht they
# are all contiguous and sorted in time. I don't know if it's
# always the case. Maybe we should use the time stamp of each
# data block to choose where to put the read data in the array.
if not lazy:
temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
# NOTE: we could directly create an empty AnalogSignal and
# load the data in it, but it is much faster to load data
# in a temporary numpy array and create the AnalogSignals
# from this temporary array
for ind_block in list_blocks:
count = count_samples(
file_blocks[ind_block]['m_length'])
fid.seek(file_blocks[ind_block]['pos']+6)
temp_array[ind:ind+count] = \
np.fromfile(fid, dtype = np.int16, count = count)
ind += count
sampling_rate = \
file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
t_start = (start_index / sampling_rate).simplified
if lazy:
ana_sig = AnalogSignal([],
sampling_rate = sampling_rate,
t_start = t_start,
name = file_blocks\
[list_chan[ind_chan]]['m_Name'],
file_origin = \
os.path.basename(self.filename),
units = pq.dimensionless)
ana_sig.lazy_shape = chan_len[ind_chan]
else:
ana_sig = AnalogSignal(temp_array,
sampling_rate = sampling_rate,
t_start = t_start,
name = file_blocks\
[list_chan[ind_chan]]['m_Name'],
file_origin = \
os.path.basename(self.filename),
units = pq.dimensionless)
ana_sig.channel_index = \
file_blocks[list_chan[ind_chan]]['m_numChannel']
ana_sig.annotate(channel_name = \
file_blocks[list_chan[ind_chan]]['m_Name'])
ana_sig.annotate(channel_type = \
file_blocks[list_chan[ind_chan]]['type_subblock'])
seg.analogsignals.append(ana_sig)
fid.close()
if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
blck.rec_datetime = datetime.datetime(\
file_blocks[0]['m_date_year'],
file_blocks[0]['m_date_month'],
file_blocks[0]['m_date_day'],
file_blocks[0]['m_time_hour'],
file_blocks[0]['m_time_minute'],
file_blocks[0]['m_time_second'],
10000 * file_blocks[0]['m_time_hsecond'])
# the 10000 is here to convert m_time_hsecond from centisecond
# to microsecond
version = file_blocks[0]['m_version']
blck.annotate(alphamap_version = version)
if cascade:
seg.rec_datetime = blck.rec_datetime.replace()
# I couldn't find a simple copy function for datetime,
# using replace without arguments is a twisted way to make a
# copy
seg.annotate(alphamap_version = version)
if cascade:
populate_RecordingChannel(blck, remove_from_annotation = True)
blck.create_many_to_one_relationship()
return blck
"""
Information for special types in [1]:
_dostime_t type definition:
struct dos_time_t
{
unsigned char hour; /* hours (0-23)*/
unsigned char minute; /* minutes (0-59)*/
unsigned char second; /* seconds (0-59) */
unsigned char hsecond; /* seconds/ 100 (0-99)*/
}
_dosdate_t type definition:
struct _dosdate_t
{
unsigned char day; /* day of month( 1-31) */
unsigned char month; /* month (1-12) */
unsigned int year; /* year (1980-2099) */
unsigned char dayofweek; /* day of week (0 = Sunday) */
}
WINDOWPLACEMENT16 type definition (according to WINE source code):
typedef struct
{
UINT16 length;
UINT16 flags;
UINT16 showCmd;
POINT16 ptMinPosition;
POINT16 ptMaxPosition;
RECT16 rcNormalPosition;
} WINDOWPLACEMENT16,*LPNONCLIENTMETRICS16;
"""
max_string_len = '32s' # maximal length of variable length strings in the file
# WARNING: I don't know what is the real value here. According to [1] p 139
# it seems that it could be 20. Some tests would be needed to check this.
# WARNING: A cleaner way to handle strings reading is suitable. Currently I
# read a buffer of max_string_len bytes and look for the C "end of string"
# character ('\x00'). It would be better either to read characters until
# reaching '\x00' or to read the exact number of characters needed, if the
# length of a string can be deduced from the lentgh of the block and the number
# of bytes already read (it seems possible, at least for certain block types).
# WARNING: Some test files contains data blocks of type 'b' and they are not
# described in the documentation.
# The name of the keys in the folowing dicts are chosen to match as closely as
# possible the names in document [1]
TypeH_Header = [
('m_nextBlock','l'),
('m_version','h'),
('m_time_hour', 'B'),
('m_time_minute', 'B'),
('m_time_second', 'B'),
('m_time_hsecond', 'B'),
('m_date_day', 'B'),
('m_date_month', 'B'),
('m_date_year', 'H'),
('m_date_dayofweek', 'B'),
('blank', 'x'), # one byte blank because of the 2 bytes alignement
('m_MinimumTime','d'),
('m_MaximumTime','d')]
Type0_SetBoards = [
('m_nextBlock','l'),
('m_BoardCount','h'),
('m_GroupCount','h'),
('m_placeMainWindow','x')] # WARNING: unknown type ('x' is wrong)
Type1_Boards = [ # WARNING: needs to be checked
('m_nextBlock','l'),
('m_Number','h'),
('m_countChannel','h'),
('m_countAnIn','h'),
('m_countAnOut','h'),
('m_countDigIn','h'),
('m_countDigOut','h'),
('m_TrigCount', 'h'), # not defined in 5.3.3 but appears in 5.5.1 and
# seems to really exist in files
# WARNING: check why 'm_TrigCount is not in the C code [2]
('m_Amplitude','f'),
('m_cSampleRate','f'), # sample rate seems to be given in kHz
('m_Duration','f'),
('m_nPreTrigmSec','f'),
('m_nPostTrigmSec','f'),
('m_TrgMode','h'),
('m_LevelValue','h'), # after this line, 5.3.3 is wrong,
# check example in 5.5.1 for the right fields
# WARNING: check why the following part is not corrected in the C code [2]
('m_nSamples','h'),
('m_fRMS','f'),
('m_ScaleFactor','f'),
('m_DapTime','f'),
('m_nameBoard', max_string_len)]
#('m_DiscMaxValue','h'), # WARNING: should this exist?
#('m_DiscMinValue','h') # WARNING: should this exist?
Type2_DefBlocksChannels = [
# common parameters for all types of channels
('m_nextBlock','l'),
('m_isAnalog','h'),
('m_isInput','h'),
('m_numChannel','h'),
('m_numColor','h'),
('m_Mode','h')]
Type2_SubBlockContinuousChannels = [
# continuous channels parameters
('blank', '2x'), # WARNING: this is not in the specs but it seems needed
('m_Amplitude','f'),
('m_SampleRate','f'),
('m_ContBlkSize','h'),
('m_ModeSpike','h'), # WARNING: the C code [2] uses usigned short here
('m_Duration','f'),
('m_bAutoScale','h'),
('m_Name', max_string_len)]
Type2_SubBlockLevelChannels = [ # WARNING: untested
# level channels parameters
('m_Amplitude','f'),
('m_SampleRate','f'),
('m_nSpikeCount','h'),
('m_ModeSpike','h'),
('m_nPreTrigmSec','f'),
('m_nPostTrigmSec','f'),
('m_LevelValue','h'),
('m_TrgMode','h'),
('m_YesRms','h'),
('m_bAutoScale','h'),
('m_Name', max_string_len)]
Type2_SubBlockExtTriggerChannels = [ # WARNING: untested
# external trigger channels parameters
('m_Amplitude','f'),
('m_SampleRate','f'),
('m_nSpikeCount','h'),
('m_ModeSpike','h'),
('m_nPreTrigmSec','f'),
('m_nPostTrigmSec','f'),
('m_TriggerNumber','h'),
('m_Name', max_string_len)]
Type2_SubBlockDigitalChannels = [
# digital channels parameters
('m_SampleRate','f'),
('m_SaveTrigger','h'),
('m_Duration','f'),
('m_PreviousStatus','h'), # WARNING: check difference with C code here
('m_Name', max_string_len)]
Type2_SubBlockUnknownChannels = [
# WARNING: We have a mode that doesn't appear in our spec, so we don't
# know what are the fields.
# It seems that for non-digital channels the beginning is
# similar to continuous channels. Let's hope we're right...
('blank', '2x'),
('m_Amplitude','f'),
('m_SampleRate','f')]
# there are probably other fields after...
Type6_DefBlockTrigger = [ # WARNING: untested
('m_nextBlock','l'),
('m_Number','h'),
('m_countChannel','h'),
('m_StateChannels','i'),
('m_numChannel1','h'),
('m_numChannel2','h'),
('m_numChannel3','h'),
('m_numChannel4','h'),
('m_numChannel5','h'),
('m_numChannel6','h'),
('m_numChannel7','h'),
('m_numChannel8','h'),
('m_Name','c')]
Type3_DefBlockGroup = [ # WARNING: untested
('m_nextBlock','l'),
('m_Number','h'),
('m_Z_Order','h'),
('m_countSubGroups','h'),
('m_placeGroupWindow','x'), # WARNING: unknown type ('x' is wrong)
('m_NetLoc','h'),
('m_locatMax','x'), # WARNING: unknown type ('x' is wrong)
('m_nameGroup','c')]
Type4_DefBlockSubgroup = [ # WARNING: untested
('m_nextBlock','l'),
('m_Number','h'),
('m_TypeOverlap','h'),
('m_Z_Order','h'),
('m_countChannel','h'),
('m_NetLoc','h'),
('m_location','x'), # WARNING: unknown type ('x' is wrong)
('m_bIsMaximized','h'),
('m_numChannel1','h'),
('m_numChannel2','h'),
('m_numChannel3','h'),
('m_numChannel4','h'),
('m_numChannel5','h'),
('m_numChannel6','h'),
('m_numChannel7','h'),
('m_numChannel8','h'),
('m_Name','c')]
Type5_DataBlockOneChannel = [
('m_numChannel','h')]
# WARNING: 'm_numChannel' (called 'm_Number' in 5.4.1 of [1]) is supposed
# to be uint according to 5.4.1 but it seems to be a short in the files
# (or should it be ushort ?)
# WARNING: In 5.1.1 page 121 of [1], they say "Note: 5 is used for demo
# purposes, 7 is used for real data", but looking at some real datafiles,
# it seems that block of type 5 are also used for real data...
Type7_DataBlockMultipleChannels = [ # WARNING: unfinished
('m_lenHead', 'h'), # WARNING: unknown true type
('FINT','h')]
# WARNING: there should be data after...
TypeP_DefBlockPeriStimHist = [ # WARNING: untested
('m_Number_Chan','h'),
('m_Position','x'), # WARNING: unknown type ('x' is wrong)
('m_isStatVisible','h'),
('m_DurationSec','f'),
('m_Rows','i'),
('m_DurationSecPre','f'),
('m_Bins','i'),
('m_NoTrigger','h')]
TypeF_DefBlockFRTachogram = [ # WARNING: untested
('m_Number_Chan','h'),
('m_Position','x'), # WARNING: unknown type ('x' is wrong)
('m_isStatVisible','h'),
('m_DurationSec','f'),
('m_AutoManualScale','i'),
('m_Max','i')]
TypeR_DefBlockRaster = [ # WARNING: untested
('m_Number_Chan','h'),
('m_Position','x'), # WARNING: unknown type ('x' is wrong)
('m_isStatVisible','h'),
('m_DurationSec','f'),
('m_Rows','i'),
('m_NoTrigger','h')]
TypeI_DefBlockISIHist = [ # WARNING: untested
('m_Number_Chan','h'),
('m_Position','x'), # WARNING: unknown type ('x' is wrong)
('m_isStatVisible','h'),
('m_DurationSec','f'),
('m_Bins','i'),
('m_TypeScale','i')]
Type8_MarkerBlock = [ # WARNING: untested
('m_Number_Channel','h'),
('m_Time','l')] # WARNING: check what's the right type here.
# It seems that the size of time_t type depends on the system typedef,
# I put long here but I couldn't check if it is the right type
Type9_ScaleBlock = [ # WARNING: untested
('m_Number_Channel','h'),
('m_Scale','f')]
Type_Unknown = []
dict_header_type = {
'h' : TypeH_Header,
'0' : Type0_SetBoards,
'1' : Type1_Boards,
'2' : Type2_DefBlocksChannels,
'6' : Type6_DefBlockTrigger,
'3' : Type3_DefBlockGroup,
'4' : Type4_DefBlockSubgroup,
'5' : Type5_DataBlockOneChannel,
'7' : Type7_DataBlockMultipleChannels,
'P' : TypeP_DefBlockPeriStimHist,
'F' : TypeF_DefBlockFRTachogram,
'R' : TypeR_DefBlockRaster,
'I' : TypeI_DefBlockISIHist,
'8' : Type8_MarkerBlock,
'9' : Type9_ScaleBlock
}
class HeaderReader():
def __init__(self,fid ,description ):
self.fid = fid
self.description = description
def read_f(self, offset =None):
if offset is not None :
self.fid.seek(offset)
d = { }
for key, fmt in self.description :
fmt = '<' + fmt # insures use of standard sizes
buf = self.fid.read(struct.calcsize(fmt))
if len(buf) != struct.calcsize(fmt) : return None
val = list(struct.unpack(fmt , buf))
for i, ival in enumerate(val):
if hasattr(ival, 'split'):
val[i] = ival.split('\x00', 1)[0]
if len(val) == 1:
val = val[0]
d[key] = val
return d
|
|
import tensorflow as tf
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import sys
# # Vhanilla RNN class and functions
class RNN_cell(object):
"""
RNN cell object which takes 3 arguments for initialization.
input_size = Input Vector size
hidden_layer_size = Hidden layer size
target_size = Output vector size
"""
def __init__(self, input_size, hidden_layer_size, target_size):
# Initialization of given values
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.target_size = target_size
# Weights and Bias for input and hidden tensor
self.Wx = tf.Variable(tf.zeros(
[self.input_size, self.hidden_layer_size]))
self.Wh = tf.Variable(tf.zeros(
[self.hidden_layer_size, self.hidden_layer_size]))
self.bi = tf.Variable(tf.zeros([self.hidden_layer_size]))
# Weights for output layers
self.Wo = tf.Variable(tf.truncated_normal(
[self.hidden_layer_size, self.target_size], mean=0, stddev=.01))
self.bo = tf.Variable(tf.truncated_normal(
[self.target_size], mean=0, stddev=.01))
# Placeholder for input vector with shape[batch, seq, embeddings]
self._inputs = tf.placeholder(tf.float32,
shape=[None, None, self.input_size],
name='inputs')
# Processing inputs to work with scan function
self.processed_input = process_batch_input_for_RNN(self._inputs)
'''
Initial hidden state's shape is [1,self.hidden_layer_size]
In First time stamp, we are doing dot product with weights to
get the shape of [batch_size, self.hidden_layer_size].
For this dot product tensorflow use broadcasting. But during
Back propagation a low level error occurs.
So to solve the problem it was needed to initialize initial
hiddden state of size [batch_size, self.hidden_layer_size].
So here is a little hack !!!! Getting the same shaped
initial hidden state of zeros.
'''
self.initial_hidden = self._inputs[:, 0, :]
self.initial_hidden = tf.matmul(
self.initial_hidden, tf.zeros([input_size, hidden_layer_size]))
# Function for vhanilla RNN.
def vanilla_rnn(self, previous_hidden_state, x):
"""
This function takes previous hidden state and input and
outputs current hidden state.
"""
current_hidden_state = tf.tanh(
tf.matmul(previous_hidden_state, self.Wh) +
tf.matmul(x, self.Wx) + self.bi)
return current_hidden_state
# Function for getting all hidden state.
def get_states(self):
"""
Iterates through time/ sequence to get all hidden state
"""
# Getting all hidden state throuh time
all_hidden_states = tf.scan(self.vanilla_rnn,
self.processed_input,
initializer=self.initial_hidden,
name='states')
return all_hidden_states
# Function to get output from a hidden layer
def get_output(self, hidden_state):
"""
This function takes hidden state and returns output
"""
output = tf.nn.relu(tf.matmul(hidden_state, self.Wo) + self.bo)
return output
# Function for getting all output layers
def get_outputs(self):
"""
Iterating through hidden states to get outputs for all timestamp
"""
all_hidden_states = self.get_states()
all_outputs = tf.map_fn(self.get_output, all_hidden_states)
return all_outputs
# Function to convert batch input data to use scan ops of tensorflow.
def process_batch_input_for_RNN(batch_input):
"""
Process tensor of size [5,3,2] to [3,5,2]
"""
batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])
X = tf.transpose(batch_input_)
return X
# # Placeholder and initializers
hidden_layer_size = 110
input_size = 8
target_size = 10
y = tf.placeholder(tf.float32, shape=[None, target_size], name='inputs')
# # Models
# Initializing rnn object
rnn = RNN_cell(input_size, hidden_layer_size, target_size)
# Getting all outputs from rnn
outputs = rnn.get_outputs()
# Getting final output through indexing after reversing
last_output = outputs[-1]
# As rnn model output the final layer through Relu activation softmax is
# used for final output.
output = tf.nn.softmax(last_output)
# Computing the Cross Entropy loss
cross_entropy = -tf.reduce_sum(y * tf.log(output))
# Trainning with Adadelta Optimizer
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
# Calculatio of correct prediction and accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(output, 1))
accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32))) * 100
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
# Dataset Preparation
# Using Sklearn MNIST dataset.
digits = datasets.load_digits()
X = digits.images
Y_ = digits.target
# One hot encoding
Y = sess.run(tf.one_hot(indices=Y_, depth=target_size))
# Getting Train and test Dataset
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.22, random_state=42)
# Cuttting for simple iteration
X_train = X_train[:1400]
y_train = y_train[:1400]
# Iterations to do trainning
for epoch in range(120):
start = 0
end = 100
for i in range(14):
X = X_train[start:end]
Y = y_train[start:end]
start = end
end = start + 100
sess.run(train_step, feed_dict={rnn._inputs: X, y: Y})
Loss = str(sess.run(cross_entropy, feed_dict={rnn._inputs: X, y: Y}))
Train_accuracy = str(sess.run(accuracy, feed_dict={
rnn._inputs: X_train, y: y_train}))
Test_accuracy = str(sess.run(accuracy, feed_dict={
rnn._inputs: X_test, y: y_test}))
sys.stdout.flush()
print("\rIteration: %s Loss: %s Train Accuracy: %s Test Accuracy: %s" %
(epoch, Loss, Train_accuracy, Test_accuracy)),
sys.stdout.flush()
|
|
# -*- coding: utf-8 -*-
'''
Support for rpm
'''
# Import python libs
from __future__ import absolute_import
import logging
import os
import re
import datetime
# Import Salt libs
import salt.utils
import salt.utils.itertools
import salt.utils.decorators as decorators
import salt.utils.pkg.rpm
# pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves import zip
try:
import rpm
HAS_RPM = True
except ImportError:
HAS_RPM = False
try:
import rpmUtils.miscutils
HAS_RPMUTILS = True
except ImportError:
HAS_RPMUTILS = False
# pylint: enable=import-error,redefined-builtin
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'lowpkg'
def __virtual__():
'''
Confine this module to rpm based systems
'''
if not salt.utils.which('rpm'):
return (False, 'The rpm execution module failed to load: rpm binary is not in the path.')
try:
os_grain = __grains__['os'].lower()
os_family = __grains__['os_family'].lower()
except Exception:
return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.')
enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux')
if os_family in ['redhat', 'suse'] or os_grain in enabled:
return __virtualname__
return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems '
'or amazon, xcp or xenserver.')
def bin_pkg_info(path, saltenv='base'):
'''
.. versionadded:: 2015.8.0
Parses RPM metadata and returns a dictionary of information about the
package (name, version, etc.).
path
Path to the file. Can either be an absolute path to a file on the
minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``).
If a salt fileserver URL is passed, the file will be cached to the
minion so that it can be examined.
saltenv : base
Salt fileserver envrionment from which to retrieve the package. Ignored
if ``path`` is a local file path on the minion.
CLI Example:
.. code-block:: bash
salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm
salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm
'''
# If the path is a valid protocol, pull it down using cp.cache_file
if __salt__['config.valid_fileproto'](path):
newpath = __salt__['cp.cache_file'](path, saltenv)
if not newpath:
raise CommandExecutionError(
'Unable to retrieve {0} from saltenv \'{1}\''
.format(path, saltenv)
)
path = newpath
else:
if not os.path.exists(path):
raise CommandExecutionError(
'{0} does not exist on minion'.format(path)
)
elif not os.path.isabs(path):
raise SaltInvocationError(
'{0} does not exist on minion'.format(path)
)
# REPOID is not a valid tag for the rpm command. Remove it and replace it
# with 'none'
queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none')
output = __salt__['cmd.run_stdout'](
['rpm', '-qp', '--queryformat', queryformat, path],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False
)
ret = {}
pkginfo = salt.utils.pkg.rpm.parse_pkginfo(
output,
osarch=__grains__['osarch']
)
for field in pkginfo._fields:
ret[field] = getattr(pkginfo, field)
return ret
def list_pkgs(*packages):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' lowpkg.list_pkgs
'''
pkgs = {}
cmd = ['rpm', '-q' if packages else '-qa',
'--queryformat', r'%{NAME} %{VERSION}\n']
if packages:
cmd.extend(packages)
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
if 'is not installed' in line:
continue
comps = line.split()
pkgs[comps[0]] = comps[1]
return pkgs
def verify(*packages, **kwargs):
'''
Runs an rpm -Va on a system, and returns the results in a dict
Files with an attribute of config, doc, ghost, license or readme in the
package header can be ignored using the ``ignore_types`` keyword argument
CLI Example:
.. code-block:: bash
salt '*' lowpkg.verify
salt '*' lowpkg.verify httpd
salt '*' lowpkg.verify httpd postfix
salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc']
'''
ftypes = {'c': 'config',
'd': 'doc',
'g': 'ghost',
'l': 'license',
'r': 'readme'}
ret = {}
ignore_types = kwargs.get('ignore_types', [])
if packages:
cmd = ['rpm', '-V']
# Can't concatenate a tuple, must do a list.extend()
cmd.extend(packages)
else:
cmd = ['rpm', '-Va']
out = __salt__['cmd.run'](cmd,
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
fdict = {'mismatch': []}
if 'missing' in line:
line = ' ' + line
fdict['missing'] = True
del fdict['mismatch']
fname = line[13:]
if line[11:12] in ftypes:
fdict['type'] = ftypes[line[11:12]]
if 'type' not in fdict or fdict['type'] not in ignore_types:
if line[0:1] == 'S':
fdict['mismatch'].append('size')
if line[1:2] == 'M':
fdict['mismatch'].append('mode')
if line[2:3] == '5':
fdict['mismatch'].append('md5sum')
if line[3:4] == 'D':
fdict['mismatch'].append('device major/minor number')
if line[4:5] == 'L':
fdict['mismatch'].append('readlink path')
if line[5:6] == 'U':
fdict['mismatch'].append('user')
if line[6:7] == 'G':
fdict['mismatch'].append('group')
if line[7:8] == 'T':
fdict['mismatch'].append('mtime')
if line[8:9] == 'P':
fdict['mismatch'].append('capabilities')
ret[fname] = fdict
return ret
def modified(*packages, **flags):
'''
List the modified files that belong to a package. Not specifying any packages
will return a list of _all_ modified files on the system's RPM database.
.. versionadded:: 2015.5.0
CLI examples:
.. code-block:: bash
salt '*' lowpkg.modified httpd
salt '*' lowpkg.modified httpd postfix
salt '*' lowpkg.modified
'''
ret = __salt__['cmd.run_all'](
['rpm', '-Va'] + list(packages),
output_loglevel='trace',
python_shell=False)
data = {}
# If verification has an output, then it means it failed
# and the return code will be 1. We are interested in any bigger
# than 1 code.
if ret['retcode'] > 1:
del ret['stdout']
return ret
elif not ret['retcode']:
return data
ptrn = re.compile(r"\s+")
changes = cfg = f_name = None
for f_info in salt.utils.itertools.split(ret['stdout'], '\n'):
f_info = ptrn.split(f_info)
if len(f_info) == 3: # Config file
changes, cfg, f_name = f_info
else:
changes, f_name = f_info
cfg = None
keys = ['size', 'mode', 'checksum', 'device', 'symlink',
'owner', 'group', 'time', 'capabilities']
changes = list(changes)
if len(changes) == 8: # Older RPMs do not support capabilities
changes.append('.')
stats = []
for k, v in zip(keys, changes):
if v != '.':
stats.append(k)
if cfg is not None:
stats.append('config')
data[f_name] = stats
if not flags:
return data
# Filtering
filtered_data = {}
for f_name, stats in data.items():
include = True
for param, pval in flags.items():
if param.startswith("_"):
continue
if (not pval and param in stats) or \
(pval and param not in stats):
include = False
break
if include:
filtered_data[f_name] = stats
return filtered_data
def file_list(*packages):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's rpm database (not generally
recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list
'''
if not packages:
cmd = ['rpm', '-qla']
else:
cmd = ['rpm', '-ql']
# Can't concatenate a tuple, must do a list.extend()
cmd.extend(packages)
ret = __salt__['cmd.run'](
cmd,
output_loglevel='trace',
python_shell=False).splitlines()
return {'errors': [], 'files': ret}
def file_dict(*packages):
'''
List the files that belong to a package, sorted by group. Not specifying
any packages will return a list of _every_ file on the system's rpm
database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_dict httpd
salt '*' lowpkg.file_dict httpd postfix
salt '*' lowpkg.file_dict
'''
errors = []
ret = {}
pkgs = {}
cmd = ['rpm', '-q' if packages else '-qa',
'--queryformat', r'%{NAME} %{VERSION}\n']
if packages:
cmd.extend(packages)
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
if 'is not installed' in line:
errors.append(line)
continue
comps = line.split()
pkgs[comps[0]] = {'version': comps[1]}
for pkg in pkgs:
files = []
cmd = ['rpm', '-ql', pkg]
out = __salt__['cmd.run'](
['rpm', '-ql', pkg],
output_loglevel='trace',
python_shell=False)
ret[pkg] = out.splitlines()
return {'errors': errors, 'packages': ret}
def owner(*paths):
'''
Return the name of the package that owns the file. Multiple file paths can
be passed. If a single path is passed, a string will be returned,
and if multiple paths are passed, a dictionary of file/package name pairs
will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.owner /usr/bin/apachectl
salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
'''
if not paths:
return ''
ret = {}
for path in paths:
cmd = ['rpm', '-qf', '--queryformat', '%{name}', path]
ret[path] = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
if 'not owned' in ret[path].lower():
ret[path] = ''
if len(ret) == 1:
return list(ret.values())[0]
return ret
@decorators.which('rpm2cpio')
@decorators.which('cpio')
@decorators.which('diff')
def diff(package, path):
'''
Return a formatted diff between current file and original in a package.
NOTE: this function includes all files (configuration and not), but does
not work on binary content.
:param package: The name of the package
:param path: Full path to the installed file
:return: Difference or empty string. For binary files only a notification.
CLI example:
.. code-block:: bash
salt '*' lowpkg.diff apache2 /etc/apache2/httpd.conf
'''
cmd = "rpm2cpio {0} " \
"| cpio -i --quiet --to-stdout .{1} " \
"| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}"
res = __salt__['cmd.shell'](cmd.format(package, path),
output_loglevel='trace')
if res and res.startswith('Binary file'):
return 'File \'{0}\' is binary and its content has been ' \
'modified.'.format(path)
return res
def info(*packages, **attr):
'''
Return a detailed package(s) summary information.
If no packages specified, all packages will be returned.
:param packages:
:param attr:
Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
Valid attributes are:
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
:return:
CLI example:
.. code-block:: bash
salt '*' lowpkg.info apache2 bash
salt '*' lowpkg.info apache2 bash attr=version
salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size
'''
# LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't
# available, then we can just use SIZE for older versions. See Issue #31366.
rpm_tags = __salt__['cmd.run_stdout'](
['rpm', '--querytags'],
python_shell=False).splitlines()
if 'LONGSIZE' in rpm_tags:
size_tag = '%{LONGSIZE}'
else:
size_tag = '%{SIZE}'
cmd = packages and "rpm -q {0}".format(' '.join(packages)) or "rpm -qa"
# Construct query format
attr_map = {
"name": "name: %{NAME}\\n",
"relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n",
"version": "version: %{VERSION}\\n",
"vendor": "vendor: %{VENDOR}\\n",
"release": "release: %{RELEASE}\\n",
"epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|",
"build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n",
"build_date": "build_date: %{BUILDTIME}\\n",
"install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n",
"install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n",
"build_host": "build_host: %{BUILDHOST}\\n",
"group": "group: %{GROUP}\\n",
"source_rpm": "source_rpm: %{SOURCERPM}\\n",
"size": "size: " + size_tag + "\\n",
"arch": "arch: %{ARCH}\\n",
"license": "%|LICENSE?{license: %{LICENSE}\\n}|",
"signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:"
"{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n",
"packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|",
"url": "%|URL?{url: %{URL}\\n}|",
"summary": "summary: %{SUMMARY}\\n",
"description": "description:\\n%{DESCRIPTION}\\n",
"edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n",
}
attr = attr.get('attr', None) and attr['attr'].split(",") or None
query = list()
if attr:
for attr_k in attr:
if attr_k in attr_map and attr_k != 'description':
query.append(attr_map[attr_k])
if not query:
raise CommandExecutionError('No valid attributes found.')
if 'name' not in attr:
attr.append('name')
query.append(attr_map['name'])
if 'edition' not in attr:
attr.append('edition')
query.append(attr_map['edition'])
else:
for attr_k, attr_v in attr_map.iteritems():
if attr_k != 'description':
query.append(attr_v)
if attr and 'description' in attr or not attr:
query.append(attr_map['description'])
query.append("-----\\n")
call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))),
output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True)
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += (call['stderr'] or call['stdout'])
raise CommandExecutionError('{0}'.format(comment))
elif 'error' in call['stderr']:
raise CommandExecutionError(call['stderr'])
else:
out = call['stdout']
_ret = list()
for pkg_info in re.split(r"----*", out):
pkg_info = pkg_info.strip()
if not pkg_info:
continue
pkg_info = pkg_info.split(os.linesep)
if pkg_info[-1].lower().startswith('distribution'):
pkg_info = pkg_info[:-1]
pkg_data = dict()
pkg_name = None
descr_marker = False
descr = list()
for line in pkg_info:
if descr_marker:
descr.append(line)
continue
line = [item.strip() for item in line.split(':', 1)]
if len(line) != 2:
continue
key, value = line
if key == 'description':
descr_marker = True
continue
if key == 'name':
pkg_name = value
# Convert Unix ticks into ISO time format
if key in ['build_date', 'install_date']:
try:
pkg_data[key] = datetime.datetime.fromtimestamp(int(value)).isoformat() + "Z"
except ValueError:
log.warning('Could not convert "{0}" into Unix time'.format(value))
continue
# Convert Unix ticks into an Integer
if key in ['build_date_time_t', 'install_date_time_t']:
try:
pkg_data[key] = int(value)
except ValueError:
log.warning('Could not convert "{0}" into Unix time'.format(value))
continue
if key not in ['description', 'name'] and value:
pkg_data[key] = value
if attr and 'description' in attr or not attr:
pkg_data['description'] = os.linesep.join(descr)
if pkg_name:
pkg_data['name'] = pkg_name
_ret.append(pkg_data)
# Force-sort package data by version,
# pick only latest versions
# (in case multiple packages installed, e.g. kernel)
ret = dict()
for pkg_data in reversed(sorted(_ret, cmp=lambda a_vrs, b_vrs: version_cmp(a_vrs['edition'], b_vrs['edition']))):
pkg_name = pkg_data.pop('name')
if pkg_name not in ret:
ret[pkg_name] = pkg_data.copy()
del ret[pkg_name]['edition']
return ret
def version_cmp(ver1, ver2, ignore_epoch=False):
'''
.. versionadded:: 2015.8.9
Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if
ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem
making the comparison.
ignore_epoch : False
Set to ``True`` to ignore the epoch when comparing versions
.. versionadded:: 2015.8.10,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002'
'''
normalize = lambda x: str(x).split(':', 1)[-1] if ignore_epoch else str(x)
ver1 = normalize(ver1)
ver2 = normalize(ver2)
try:
cmp_func = None
if HAS_RPM:
try:
cmp_func = rpm.labelCompare
except AttributeError:
# Catches corner case where someone has a module named "rpm" in
# their pythonpath.
log.debug(
'rpm module imported, but it does not have the '
'labelCompare function. Not using rpm.labelCompare for '
'version comparison.'
)
if cmp_func is None and HAS_RPMUTILS:
try:
cmp_func = rpmUtils.miscutils.compareEVR
except AttributeError:
log.debug('rpmUtils.miscutils.compareEVR is not available')
if cmp_func is None:
if salt.utils.which('rpmdev-vercmp'):
# rpmdev-vercmp always uses epochs, even when zero
def _ensure_epoch(ver):
def _prepend(ver):
return '0:{0}'.format(ver)
try:
if ':' not in ver:
return _prepend(ver)
except TypeError:
return _prepend(ver)
return ver
ver1 = _ensure_epoch(ver1)
ver2 = _ensure_epoch(ver2)
result = __salt__['cmd.run'](['rpmdev-vercmp', ver1, ver2],
python_shell=False,
ignore_retcode=True).strip()
if result.endswith('equal'):
return 0
elif 'is newer' in result:
newer_version = result.split()[0]
if newer_version == ver1:
return 1
elif newer_version == ver2:
return -1
log.warning(
'Failed to interpret results of rpmdev-vercmp output: %s',
result
)
else:
# We'll need to fall back to salt.utils.version_cmp()
log.warning(
'rpmdevtools is not installed, please install it for '
'more accurate version comparisons'
)
else:
# If one EVR is missing a release but not the other and they
# otherwise would be equal, ignore the release. This can happen if
# e.g. you are checking if a package version 3.2 is satisfied by
# 3.2-1.
(ver1_e, ver1_v, ver1_r) = salt.utils.str_version_to_evr(ver1)
(ver2_e, ver2_v, ver2_r) = salt.utils.str_version_to_evr(ver2)
if not ver1_r or not ver2_r:
ver1_r = ver2_r = ''
cmp_result = cmp_func((ver1_e, ver1_v, ver1_r),
(ver2_e, ver2_v, ver2_r))
if cmp_result not in (-1, 0, 1):
raise CommandExecutionError(
'Comparison result \'{0}\' is invalid'.format(cmp_result)
)
return cmp_result
except Exception as exc:
log.warning(
'Failed to compare version \'%s\' to \'%s\' using RPM: %s',
ver1, ver2, exc
)
# We would already have normalized the versions at the beginning of this
# function if ignore_epoch=True, so avoid unnecessary work and just pass
# False for this value.
return salt.utils.version_cmp(ver1, ver2, ignore_epoch=False)
def checksum(*paths):
'''
Return if the signature of a RPM file is valid.
CLI Example:
.. code-block:: bash
salt '*' lowpkg.checksum /path/to/package1.rpm
salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm
'''
ret = dict()
if not paths:
raise CommandExecutionError("No package files has been specified.")
for package_file in paths:
ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and
not __salt__['cmd.retcode'](["rpm", "-K", "--quiet", package_file],
ignore_retcode=True,
output_loglevel='trace',
python_shell=False))
return ret
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Third Party Stuff
from django import forms
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from pagedown.widgets import PagedownWidget
# Junction Stuff
from junction.base.constants import (
ConferenceSettingConstants,
ProposalReviewerComment,
ProposalReviewStatus,
ProposalStatus,
ProposalTargetAudience,
ProposalVotesFilter
)
from junction.proposals.models import ProposalSection, ProposalSectionReviewerVoteValue, ProposalType
def _get_proposal_section_choices(conference, action="edit"):
if action == "create":
return [(str(cps.id), cps.name)
for cps in ProposalSection.objects.filter(
conferences=conference)]
else:
return [(str(cps.id), cps.name)
for cps in ProposalSection.objects.filter(
conferences=conference)]
def _get_proposal_type_choices(conference, action='edit'):
if action == "create":
return [(str(cpt.id), cpt.name)
for cpt in ProposalType.objects.filter(
conferences=conference, end_date__gt=now())]
else:
return [(str(cpt.id), cpt.name)
for cpt in ProposalType.objects.filter(
conferences=conference)]
def _get_proposal_section_reviewer_vote_choices(conference):
allow_plus_zero_vote = ConferenceSettingConstants.ALLOW_PLUS_ZERO_REVIEWER_VOTE
plus_zero_vote_setting = conference.conferencesetting_set.filter(
name=allow_plus_zero_vote['name']).first()
if plus_zero_vote_setting:
plus_zero_vote_setting_value = plus_zero_vote_setting.value
else:
plus_zero_vote_setting_value = True
values = []
for i in ProposalSectionReviewerVoteValue.objects.all():
if i.vote_value == 0 and not plus_zero_vote_setting_value:
continue
values.append((i.vote_value, '{} ({})'.format(
i.description, i.vote_value)))
return values
class HorizRadioRenderer(forms.RadioSelect.renderer):
"""
This overrides widget method to put radio buttons horizontally instead of vertically.
"""
def render(self):
"""Outputs radios"""
return mark_safe(u'\n'.join([u'%s\n' % w for w in self]))
class ProposalForm(forms.Form):
'''
Used for create/edit
'''
title = forms.CharField(min_length=10,
help_text="Title of the proposal, no buzz words!",
widget=forms.TextInput(attrs={'class': 'charfield'}))
description = forms.CharField(widget=PagedownWidget(show_preview=True),
help_text=("Describe your proposal with clear objective in simple sentence."
" Keep it short and simple."))
target_audience = forms.ChoiceField(
choices=ProposalTargetAudience.CHOICES,
widget=forms.Select(attrs={'class': 'dropdown'}))
status = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'dropdown'}),
choices=ProposalStatus.CHOICES,
help_text=("If you choose DRAFT people can't the see the session in the list."
" Make the proposal PUBLIC when you're done with editing the session."))
proposal_type = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'dropdown'}))
proposal_section = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'dropdown'}))
# Additional Content
prerequisites = forms.CharField(
widget=PagedownWidget(show_preview=True), required=False,
help_text="What should the participants know before attending your session?")
content_urls = forms.CharField(
widget=PagedownWidget(show_preview=True), required=False,
help_text="Links to your session like GitHub repo, Blog, Slideshare etc ...")
speaker_info = forms.CharField(
widget=PagedownWidget(show_preview=True), required=False,
help_text="Say something about yourself, work etc...")
speaker_links = forms.CharField(
widget=PagedownWidget(show_preview=True), required=False,
help_text="Links to your previous work like Blog, Open Source Contributions etc ...")
def __init__(self, conference, action="edit", *args, **kwargs):
super(ProposalForm, self).__init__(*args, **kwargs)
self.fields['proposal_section'].choices = _get_proposal_section_choices(
conference, action=action)
self.fields['proposal_type'].choices = _get_proposal_type_choices(
conference, action=action)
@classmethod
def populate_form_for_update(self, proposal):
form = ProposalForm(proposal.conference,
initial={'title': proposal.title,
'description': proposal.description,
'target_audience': proposal.target_audience,
'prerequisites': proposal.prerequisites,
'content_urls': proposal.content_urls,
'speaker_info': proposal.speaker_info,
'speaker_links': proposal.speaker_links,
'status': proposal.status,
'proposal_section': proposal.proposal_section.pk,
'proposal_type': proposal.proposal_type.pk, })
return form
class ProposalCommentForm(forms.Form):
'''
Used to add comments
'''
comment = forms.CharField(widget=PagedownWidget(show_preview=True))
private = forms.BooleanField(required=False, widget=forms.HiddenInput())
reviewer = forms.BooleanField(required=False, widget=forms.HiddenInput())
class ProposalReviewForm(forms.Form):
"""
Used to review the proposal.
"""
review_status = forms.ChoiceField(
choices=ProposalReviewStatus.CHOICES,
widget=forms.RadioSelect()
)
class ProposalReviewerVoteForm(forms.Form):
"""
Used by ProposalSectionReviewers to vote on proposals.
"""
vote_value = forms.ChoiceField(widget=forms.RadioSelect())
comment = forms.CharField(
widget=forms.Textarea(attrs={'minlength': '30'}),
help_text="Leave a comment justifying your vote.",
)
def __init__(self, *args, **kwargs):
conference = kwargs.pop('conference', None)
super(ProposalReviewerVoteForm, self).__init__(*args, **kwargs)
choices = _get_proposal_section_reviewer_vote_choices(conference)
self.fields['vote_value'].choices = choices
class ProposalTypesChoices(forms.Form):
"""
Base proposal form with proposal sections & types.
"""
proposal_section = forms.ChoiceField(widget=forms.Select(
attrs={'class': 'dropdown'}))
proposal_type = forms.ChoiceField(widget=forms.Select(
attrs={'class': 'dropdown'}))
def __init__(self, conference, *args, **kwargs):
super(ProposalTypesChoices, self).__init__(*args, **kwargs)
self.fields['proposal_section'].choices = _get_proposal_section_choices(
conference)
self.fields['proposal_type'].choices = _get_proposal_type_choices(
conference)
class ProposalsToReviewForm(ProposalTypesChoices):
"""
Used to filter proposals
"""
reviewer_comment = forms.ChoiceField(widget=forms.Select(attrs={'class': 'dropdown'}))
def __init__(self, conference, proposal_sections, *args, **kwargs):
super(ProposalsToReviewForm, self).__init__(conference, *args, **kwargs)
ps_choices = [(str(ps.id), ps.name) for ps in proposal_sections]
self.fields['reviewer_comment'].choices = ProposalReviewerComment.CHOICES
self.fields['proposal_section'].choices = ps_choices
for name, field in list(self.fields.items()):
field.choices.insert(0, ('all', 'All'))
class ProposalVotesFilterForm(ProposalTypesChoices):
"""
Form to filter proposals based on votes and review_status.
"""
votes = forms.ChoiceField(widget=forms.Select(attrs={'class': 'dropdown votes'}))
review_status = forms.ChoiceField(widget=forms.Select(attrs={'class': 'dropdown'}))
def __init__(self, conference, *args, **kwargs):
super(ProposalVotesFilterForm, self).__init__(conference, *args, **kwargs)
self.fields['votes'].choices = ProposalVotesFilter.CHOICES
self.fields['review_status'].choices = ProposalReviewStatus.CHOICES
for name, field in list(self.fields.items()):
field.choices.insert(0, ('all', 'All'))
|
|
import time
from hashlib import md5
import requests
from django.conf import settings
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from rest_framework.views import APIView
from rest_framework_extensions.cache.decorators import cache_response
from sputniktask.apps.accounts.authentication import ExpiringTokenAuthentication
from sputniktask.apps.marvel.serializers import ComicsListSerializer, OffsetPaginationSerializer
class MarvelAPIMixIn(APIView):
authentication_classes = (ExpiringTokenAuthentication,)
permission_classes = (IsAuthenticated,)
api_method = None
default_params = None
serializer_class = OffsetPaginationSerializer
@staticmethod
def get_signature():
ts = str(time.time())
return {
'ts': ts,
'hash': md5(ts + settings.MARVEL_SECRET_KEY + settings.MARVEL_PUBLIC_KEY).hexdigest(),
'apikey': settings.MARVEL_PUBLIC_KEY,
}
def call_api(self, method, params):
query_params = dict(params, **self.get_signature())
return requests.get(settings.MARVEL_API_URL + method, params=query_params)
class ComicsListWithTitle(MarvelAPIMixIn):
"""
Get the list of comics with specific title.
Response object and errors is described at
[developer.marvel.com](http://developer.marvel.com/docs#!/public/getComicsCollection_get_6)
"""
api_method = 'comics'
default_params = {
'format': 'comic',
'formatType': 'comic',
'orderBy': 'focDate',
'limit': 10,
'offset': 0,
}
serializer_class = ComicsListSerializer
@cache_response()
def get(self, request, *args, **kwargs):
"""
---
parameters:
- name: title
description: Required parameter.
required: true
type: string
paramType: query
- name: limit
description: Limit the result set to the specified number of resources.
required: false
type: int
paramType: query
- name: offset
description: Skip the specified number of resources in the result set.
required: false
type: int
paramType: query
"""
serializer = self.serializer_class(data=request.query_params)
if serializer.is_valid():
params = self.default_params.copy()
params.update(**serializer.data)
response = self.call_api(self.api_method, params)
return Response(response.json(), status=response.status_code)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class HeroEventsList(MarvelAPIMixIn):
"""
Get the list of events which related to the provided character.
Response object and errors is described at
[developer.marvel.com](http://developer.marvel.com/docs#!/public/getEventsCollection_get_18)
"""
api_method = 'events'
default_params = {
'characters': None,
'limit': 10,
'offset': 0
}
@cache_response()
def get(self, request, *args, **kwargs):
"""
---
parameters:
- name: limit
description: Limit the result set to the specified number of resources.
required: false
type: int
paramType: query
- name: offset
description: Skip the specified number of resources in the result set.
required: false
type: int
paramType: query
"""
serializer = self.serializer_class(data=request.query_params)
if serializer.is_valid():
params = self.default_params.copy()
params.update(characters=kwargs['hero_id'], **serializer.data)
response = self.call_api(self.api_method, params)
return Response(response.json(), status=response.status_code)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class ComicsListWithSimilarAuthors(MarvelAPIMixIn):
"""
Get the list of comics which have similar creators as a provided one.
Response object and errors is described at
[developer.marvel.com](http://developer.marvel.com/docs#!/public/getComicsCollection_get_6)
"""
api_method = 'comics'
default_params = {
'creators': '',
'format': 'comic',
'formatType': 'comic',
'orderBy': 'focDate',
'limit': 10,
'offset': 0
}
@cache_response()
def get(self, request, *args, **kwargs):
"""
---
parameters:
- name: limit
description: Limit the result set to the specified number of resources.
required: false
type: int
paramType: query
- name: offset
description: Skip the specified number of resources in the result set.
required: false
type: int
paramType: query
"""
serializer = self.serializer_class(data=request.query_params)
if serializer.is_valid():
response = self.call_api('comics/{}/creators'.format(kwargs['comic_id']), {'limit': 100})
if response.status_code == 200:
authors_list = [str(item['id']) for item in response.json()['data']['results']]
if authors_list:
params = self.default_params.copy()
creators = '.'.join(authors_list)
params.update(creators=creators, **serializer.data)
response = self.call_api(self.api_method, params)
else:
return Response({'detail': 'Seems like there is no such comic in Marvel database.'},
status=HTTP_400_BAD_REQUEST)
return Response(response.json(), status=response.status_code)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class ComicsListWithSimilarCharacters(MarvelAPIMixIn):
"""
Get the list of comics which have similar characters as a provided one.
Response object and errors is described at
[developer.marvel.com](http://developer.marvel.com/docs#!/public/getComicsCollection_get_6)
"""
api_method = 'comics'
default_params = {
'characters': '',
'format': 'comic',
'formatType': 'comic',
'orderBy': 'focDate',
'limit': 10,
'offset': 0
}
@cache_response()
def get(self, request, *args, **kwargs):
"""
---
parameters:
- name: limit
description: Limit the result set to the specified number of resources.
required: false
type: int
paramType: query
- name: offset
description: Skip the specified number of resources in the result set.
required: false
type: int
paramType: query
"""
serializer = self.serializer_class(data=request.query_params)
if serializer.is_valid():
response = self.call_api('comics/{}/characters'.format(kwargs['comic_id']), {'limit': 100})
if response.status_code == 200:
hero_list = [str(item['id']) for item in response.json()['data']['results']]
if hero_list:
params = self.default_params.copy()
characters = '.'.join(hero_list)
params.update(characters=characters, **serializer.data)
response = self.call_api(self.api_method, params)
else:
return Response({'detail': 'Seems like there is no such comic in Marvel database.'},
status=HTTP_400_BAD_REQUEST)
return Response(response.json(), status=response.status_code)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class ComicsListFromSameSeries(MarvelAPIMixIn):
"""
Get the list of comics from the same series as a provided one.
Response object and errors is described at
[developer.marvel.com](http://developer.marvel.com/docs#!/public/getComicsCollection_get_6)
"""
api_method = 'comics'
default_params = {
'series': '',
'format': 'comic',
'formatType': 'comic',
'orderBy': 'focDate',
'limit': 10,
'offset': 0
}
@cache_response()
def get(self, request, *args, **kwargs):
"""
---
parameters:
- name: limit
description: Limit the result set to the specified number of resources.
required: false
type: int
paramType: query
- name: offset
description: Skip the specified number of resources in the result set.
required: false
type: int
paramType: query
"""
serializer = self.serializer_class(data=request.query_params)
if serializer.is_valid():
response = self.call_api('series', {'limit': 1,
'comics': kwargs['comic_id']})
if response.status_code == 200:
series_list = [str(item['id']) for item in response.json()['data']['results']]
if series_list:
params = self.default_params.copy()
series = series_list[0]
params.update(series=series, **serializer.data)
response = self.call_api(self.api_method, params)
else:
return Response({'detail': 'Seems like there is no such comic in Marvel database.'},
status=HTTP_400_BAD_REQUEST)
return Response(response.json(), status=response.status_code)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
|
|
from __future__ import division
import numpy as np
from pandas import Interval, Timestamp, Timedelta
import pandas.core.common as com
import pytest
import pandas.util.testing as tm
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with tm.assert_raises_regex(TypeError, msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
@pytest.mark.parametrize('left, right, expected', [
(0, 5, 5),
(-2, 5.5, 7.5),
(10, 10, 0),
(10, np.inf, np.inf),
(-np.inf, -5, np.inf),
(-np.inf, np.inf, np.inf),
(Timedelta('0 days'), Timedelta('5 days'), Timedelta('5 days')),
(Timedelta('10 days'), Timedelta('10 days'), Timedelta('0 days')),
(Timedelta('1H10M'), Timedelta('5H5M'), Timedelta('3H55M')),
(Timedelta('5S'), Timedelta('1H'), Timedelta('59M55S'))])
def test_length(self, left, right, expected):
# GH 18789
iv = Interval(left, right)
result = iv.length
assert result == expected
@pytest.mark.parametrize('left, right, expected', [
('2017-01-01', '2017-01-06', '5 days'),
('2017-01-01', '2017-01-01 12:00:00', '12 hours'),
('2017-01-01 12:00', '2017-01-01 12:00:00', '0 days'),
('2017-01-01 12:01', '2017-01-05 17:31:00', '4 days 5 hours 30 min')])
@pytest.mark.parametrize('tz', (None, 'UTC', 'CET', 'US/Eastern'))
def test_length_timestamp(self, tz, left, right, expected):
# GH 18789
iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
result = iv.length
expected = Timedelta(expected)
assert result == expected
@pytest.mark.parametrize('left, right', [
('a', 'z'),
(('a', 'b'), ('c', 'd')),
(list('AB'), list('ab')),
(Interval(0, 1), Interval(1, 2))])
def test_length_errors(self, left, right):
# GH 18789
iv = Interval(left, right)
msg = 'cannot compute length between .* and .*'
with tm.assert_raises_regex(TypeError, msg):
iv.length
def test_math_add(self, interval):
expected = Interval(1, 2)
actual = interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + interval
assert expected == actual
actual = interval
actual += 1
assert expected == actual
msg = r"unsupported operand type\(s\) for \+"
with tm.assert_raises_regex(TypeError, msg):
interval + Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval + 'foo'
def test_math_sub(self, interval):
expected = Interval(-1, 0)
actual = interval - 1
assert expected == actual
actual = interval
actual -= 1
assert expected == actual
msg = r"unsupported operand type\(s\) for -"
with tm.assert_raises_regex(TypeError, msg):
interval - Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval - 'foo'
def test_math_mult(self, interval):
expected = Interval(0, 2)
actual = interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * interval
assert expected == actual
actual = interval
actual *= 2
assert expected == actual
msg = r"unsupported operand type\(s\) for \*"
with tm.assert_raises_regex(TypeError, msg):
interval * Interval(1, 2)
msg = r"can\'t multiply sequence by non-int"
with tm.assert_raises_regex(TypeError, msg):
interval * 'foo'
def test_math_div(self, interval):
expected = Interval(0, 0.5)
actual = interval / 2.0
assert expected == actual
actual = interval
actual /= 2.0
assert expected == actual
msg = r"unsupported operand type\(s\) for /"
with tm.assert_raises_regex(TypeError, msg):
interval / Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval / 'foo'
def test_constructor_errors(self):
msg = "invalid option for 'closed': foo"
with tm.assert_raises_regex(ValueError, msg):
Interval(0, 1, closed='foo')
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
Interval(1, 0)
@pytest.mark.parametrize('tz_left, tz_right', [
(None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
def test_constructor_errors_tz(self, tz_left, tz_right):
# GH 18538
left = Timestamp('2017-01-01', tz=tz_left)
right = Timestamp('2017-01-02', tz=tz_right)
error = TypeError if com._any_none(tz_left, tz_right) else ValueError
with pytest.raises(error):
Interval(left, right)
|
|
"""
Support for Z-Wave lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.zwave/
"""
import logging
# Because we do not compile openzwave on CI
# pylint: disable=import-error
from threading import Timer
from homeassistant.components.light import ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, \
ATTR_RGB_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, \
SUPPORT_RGB_COLOR, DOMAIN, Light
from homeassistant.components import zwave
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.util.color import HASS_COLOR_MAX, HASS_COLOR_MIN, \
color_temperature_mired_to_kelvin, color_temperature_to_rgb, \
color_rgb_to_rgbw, color_rgbw_to_rgb
_LOGGER = logging.getLogger(__name__)
AEOTEC = 0x86
AEOTEC_ZW098_LED_BULB = 0x62
AEOTEC_ZW098_LED_BULB_LIGHT = (AEOTEC, AEOTEC_ZW098_LED_BULB)
LINEAR = 0x14f
LINEAR_WD500Z_DIMMER = 0x3034
LINEAR_WD500Z_DIMMER_LIGHT = (LINEAR, LINEAR_WD500Z_DIMMER)
GE = 0x63
GE_12724_DIMMER = 0x3031
GE_12724_DIMMER_LIGHT = (GE, GE_12724_DIMMER)
DRAGONTECH = 0x184
DRAGONTECH_PD100_DIMMER = 0x3032
DRAGONTECH_PD100_DIMMER_LIGHT = (DRAGONTECH, DRAGONTECH_PD100_DIMMER)
ACT = 0x01
ACT_ZDP100_DIMMER = 0x3030
ACT_ZDP100_DIMMER_LIGHT = (ACT, ACT_ZDP100_DIMMER)
HOMESEER = 0x0c
HOMESEER_WD100_DIMMER = 0x3034
HOMESEER_WD100_DIMMER_LIGHT = (HOMESEER, HOMESEER_WD100_DIMMER)
COLOR_CHANNEL_WARM_WHITE = 0x01
COLOR_CHANNEL_COLD_WHITE = 0x02
COLOR_CHANNEL_RED = 0x04
COLOR_CHANNEL_GREEN = 0x08
COLOR_CHANNEL_BLUE = 0x10
WORKAROUND_ZW098 = 'zw098'
WORKAROUND_DELAY = 'alt_delay'
DEVICE_MAPPINGS = {
AEOTEC_ZW098_LED_BULB_LIGHT: WORKAROUND_ZW098,
LINEAR_WD500Z_DIMMER_LIGHT: WORKAROUND_DELAY,
GE_12724_DIMMER_LIGHT: WORKAROUND_DELAY,
DRAGONTECH_PD100_DIMMER_LIGHT: WORKAROUND_DELAY,
ACT_ZDP100_DIMMER_LIGHT: WORKAROUND_DELAY,
HOMESEER_WD100_DIMMER_LIGHT: WORKAROUND_DELAY,
}
# Generate midpoint color temperatures for bulbs that have limited
# support for white light colors
TEMP_MID_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 2 + HASS_COLOR_MIN
TEMP_WARM_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 3 * 2 + HASS_COLOR_MIN
TEMP_COLD_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 3 + HASS_COLOR_MIN
SUPPORT_ZWAVE = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_RGB_COLOR
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Find and add Z-Wave lights."""
if discovery_info is None or zwave.NETWORK is None:
return
node = zwave.NETWORK.nodes[discovery_info[zwave.const.ATTR_NODE_ID]]
value = node.values[discovery_info[zwave.const.ATTR_VALUE_ID]]
if value.command_class != zwave.const.COMMAND_CLASS_SWITCH_MULTILEVEL:
return
if value.type != zwave.const.TYPE_BYTE:
return
if value.genre != zwave.const.GENRE_USER:
return
value.set_change_verified(False)
if node.has_command_class(zwave.const.COMMAND_CLASS_SWITCH_COLOR):
try:
add_devices([ZwaveColorLight(value)])
except ValueError as exception:
_LOGGER.warning(
"Error initializing as color bulb: %s "
"Initializing as standard dimmer.", exception)
add_devices([ZwaveDimmer(value)])
else:
add_devices([ZwaveDimmer(value)])
def brightness_state(value):
"""Return the brightness and state."""
if value.data > 0:
return (value.data / 99) * 255, STATE_ON
else:
return 255, STATE_OFF
class ZwaveDimmer(zwave.ZWaveDeviceEntity, Light):
"""Representation of a Z-Wave dimmer."""
# pylint: disable=too-many-arguments
def __init__(self, value):
"""Initialize the light."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
zwave.ZWaveDeviceEntity.__init__(self, value, DOMAIN)
self._brightness = None
self._state = None
self._alt_delay = None
self._zw098 = None
# Enable appropriate workaround flags for our device
# Make sure that we have values for the key before converting to int
if (value.node.manufacturer_id.strip() and
value.node.product_id.strip()):
specific_sensor_key = (int(value.node.manufacturer_id, 16),
int(value.node.product_id, 16))
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZW098:
_LOGGER.debug("AEOTEC ZW098 workaround enabled")
self._zw098 = 1
elif DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_DELAY:
_LOGGER.debug("Dimmer delay workaround enabled for node:"
" %s", value.parent_id)
self._alt_delay = 1
self.update_properties()
# Used for value change event handling
self._refreshing = False
self._timer = None
dispatcher.connect(
self._value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
def update_properties(self):
"""Update internal properties based on zwave values."""
# Brightness
self._brightness, self._state = brightness_state(self._value)
def _value_changed(self, value):
"""Called when a value has changed on the network."""
if self._value.value_id == value.value_id or \
self._value.node == value.node:
if self._refreshing:
self._refreshing = False
self.update_properties()
else:
def _refresh_value():
"""Used timer callback for delayed value refresh."""
self._refreshing = True
self._value.refresh()
if self._timer is not None and self._timer.isAlive():
self._timer.cancel()
if self._alt_delay:
self._timer = Timer(5, _refresh_value)
else:
self._timer = Timer(2, _refresh_value)
self._timer.start()
self.update_ha_state()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_ZWAVE
def turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
# Zwave multilevel switches use a range of [0, 99] to control
# brightness.
brightness = int((self._brightness / 255) * 99)
if self._value.node.set_dimmer(self._value.value_id, brightness):
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._value.node.set_dimmer(self._value.value_id, 0):
self._state = STATE_OFF
def ct_to_rgb(temp):
"""Convert color temperature (mireds) to RGB."""
colorlist = list(
color_temperature_to_rgb(color_temperature_mired_to_kelvin(temp)))
return [int(val) for val in colorlist]
class ZwaveColorLight(ZwaveDimmer):
"""Representation of a Z-Wave color changing light."""
def __init__(self, value):
"""Initialize the light."""
self._value_color = None
self._value_color_channels = None
self._color_channels = None
self._rgb = None
self._ct = None
# Currently zwave nodes only exist with one color element per node.
for value_color in value.node.get_rgbbulbs().values():
self._value_color = value_color
if self._value_color is None:
raise ValueError("No color command found.")
for value_color_channels in value.node.get_values(
class_id=zwave.const.COMMAND_CLASS_SWITCH_COLOR,
genre='System', type="Int").values():
self._value_color_channels = value_color_channels
if self._value_color_channels is None:
raise ValueError("Color Channels not found.")
super().__init__(value)
def update_properties(self):
"""Update internal properties based on zwave values."""
super().update_properties()
# Color Channels
self._color_channels = self._value_color_channels.data
# Color Data String
data = self._value_color.data
# RGB is always present in the openzwave color data string.
self._rgb = [
int(data[1:3], 16),
int(data[3:5], 16),
int(data[5:7], 16)]
# Parse remaining color channels. Openzwave appends white channels
# that are present.
index = 7
# Warm white
if self._color_channels & COLOR_CHANNEL_WARM_WHITE:
warm_white = int(data[index:index+2], 16)
index += 2
else:
warm_white = 0
# Cold white
if self._color_channels & COLOR_CHANNEL_COLD_WHITE:
cold_white = int(data[index:index+2], 16)
index += 2
else:
cold_white = 0
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if warm_white > 0:
self._ct = TEMP_WARM_HASS
self._rgb = ct_to_rgb(self._ct)
elif cold_white > 0:
self._ct = TEMP_COLD_HASS
self._rgb = ct_to_rgb(self._ct)
else:
# RGB color is being used. Just report midpoint.
self._ct = TEMP_MID_HASS
elif self._color_channels & COLOR_CHANNEL_WARM_WHITE:
self._rgb = list(color_rgbw_to_rgb(*self._rgb, w=warm_white))
elif self._color_channels & COLOR_CHANNEL_COLD_WHITE:
self._rgb = list(color_rgbw_to_rgb(*self._rgb, w=cold_white))
# If no rgb channels supported, report None.
if not (self._color_channels & COLOR_CHANNEL_RED or
self._color_channels & COLOR_CHANNEL_GREEN or
self._color_channels & COLOR_CHANNEL_BLUE):
self._rgb = None
@property
def rgb_color(self):
"""Return the rgb color."""
return self._rgb
@property
def color_temp(self):
"""Return the color temperature."""
return self._ct
def turn_on(self, **kwargs):
"""Turn the device on."""
rgbw = None
if ATTR_COLOR_TEMP in kwargs:
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if kwargs[ATTR_COLOR_TEMP] > TEMP_MID_HASS:
self._ct = TEMP_WARM_HASS
rgbw = b'#000000FF00'
else:
self._ct = TEMP_COLD_HASS
rgbw = b'#00000000FF'
elif ATTR_RGB_COLOR in kwargs:
self._rgb = kwargs[ATTR_RGB_COLOR]
if (not self._zw098 and (
self._color_channels & COLOR_CHANNEL_WARM_WHITE or
self._color_channels & COLOR_CHANNEL_COLD_WHITE)):
rgbw = b'#'
for colorval in color_rgb_to_rgbw(*self._rgb):
rgbw += format(colorval, '02x').encode('utf-8')
rgbw += b'00'
else:
rgbw = b'#'
for colorval in self._rgb:
rgbw += format(colorval, '02x').encode('utf-8')
rgbw += b'0000'
if rgbw is None:
_LOGGER.warning("rgbw string was not generated for turn_on")
else:
self._value_color.node.set_rgbw(self._value_color.value_id, rgbw)
super().turn_on(**kwargs)
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing that we handle merge conflicts properly"""
import os
from shutil import rmtree
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestMergeConflicts(TestBrokerCommand):
def test_100_addchangetest3sandbox(self):
self.successtest(["add", "sandbox", "--sandbox", "changetest3"])
def test_100_addchangetest4sandbox(self):
self.successtest(["add", "sandbox", "--sandbox", "changetest4"])
def test_100_addchangetargetdomain(self):
self.successtest(["add", "domain", "--domain", "changetarget"])
def test_110_trackchangetest4(self):
self.commandtest(["add_domain", "--domain", "changetest4-tracker",
"--track", "changetest4"])
def test_110_trackchangetarget(self):
self.commandtest(["add_domain", "--domain", "changetarget-tracker",
"--track", "changetarget"])
def test_120_makeconflictingchange(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest3")
template = self.find_template("aquilon", "archetype", "base",
sandbox="changetest3")
with open(template) as f:
contents = f.readlines()
contents.append("#Added by changetest3\n")
with open(template, 'w') as f:
f.writelines(contents)
self.gitcommand(["commit", "-a", "-m", "added changetest3 comment"],
cwd=sandboxdir)
sandboxdir = os.path.join(self.sandboxdir, "changetest4")
template = self.find_template("aquilon", "archetype", "base",
sandbox="changetest4")
with open(template) as f:
contents = f.readlines()
contents.append("#Added by changetest4\n")
with open(template, 'w') as f:
f.writelines(contents)
self.gitcommand(["commit", "-a", "-m", "added changetest4 comment"],
cwd=sandboxdir)
def test_121_publishchangetest3sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest3")
self.successtest(["publish", "--branch", "changetest3"],
env=self.gitenv(), cwd=sandboxdir)
def test_121_publishchangetest4sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest4")
self.successtest(["publish", "--branch", "changetest4"],
env=self.gitenv(), cwd=sandboxdir)
def test_122_deploychangetest3sandbox(self):
command = ["deploy", "--source", "changetest3", "--target", "changetarget"]
out = self.statustest(command)
self.matchoutput(out, "Updating the checked out copy of domain "
"changetarget...", command)
self.matchoutput(out, "Updating the checked out copy of domain "
"changetarget-tracker...", command)
template = self.find_template("aquilon", "archetype", "base",
domain="changetarget")
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by changetest3\n")
def test_122_deploychangetest4sandbox(self):
command = "deploy --source changetest4 --target changetarget"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Automatic merge failed;", command)
repo = os.path.join(self.config.get("broker", "domainsdir"),
"changetarget")
self.check_git_merge_health(repo)
def test_123_prepchangetest4sandbox(self):
# Fix up the branch and get it ready for a successful put.
sandboxdir = os.path.join(self.sandboxdir, "changetest4")
self.gitcommand(["fetch"], cwd=sandboxdir)
command = ["merge", "origin/changetest3"]
out, _ = self.gitcommand_expectfailure(command, cwd=sandboxdir)
self.matchoutput(out, "Automatic merge failed;", command)
# The file will now have merge conflicts. Cheat by grabbing
# the copy from changetest3.
for ext in [".tpl", ".pan"]:
base = os.path.join("aquilon", "archetype", "base" + ext)
if os.path.exists(os.path.join(sandboxdir, base)):
break
self.gitcommand(["checkout", "origin/changetest3", base],
cwd=sandboxdir)
template = os.path.join(sandboxdir, base)
with open(template) as f:
contents = f.readlines()
contents.append("#Added by changetest4\n")
with open(template, 'w') as f:
f.writelines(contents)
self.gitcommand(["add", base], cwd=sandboxdir)
self.gitcommand(["commit", "-a", "-m", "added changetest4 comment"],
cwd=sandboxdir)
def test_124_prepchangetest3conflict(self):
# Model someone doing a put of a conflicting change by forgetting
# that *we* put the conflicting change. :) Having two users doing
# two different put operations on a sandbox is hard in this
# framework. Instead, we fake it by rewinding the sandbox to the
# previous commit!
sandboxdir = os.path.join(self.sandboxdir, "changetest3")
self.gitcommand(["reset", "--hard", "HEAD^1"], cwd=sandboxdir)
template = self.find_template("aquilon", "archetype", "base",
sandbox="changetest3")
with open(template) as f:
contents = f.readlines()
contents.append("#Added by prepchangetest3conflict\n")
with open(template, 'w') as f:
f.writelines(contents)
self.gitcommand(["commit", "-a", "-m",
"added prepchangetest3conflict comment"],
cwd=sandboxdir)
def test_125_publishchangetest3sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest3")
command = ["publish", "--branch=changetest3"]
# Ignore STDOUT messages explaining what will be pushed.
out = self.badrequesttest(command, ignoreout=True,
env=self.gitenv(), cwd=sandboxdir)
self.matchoutput(out, "rejected", command)
self.matchoutput(out, "non-fast-forward", command)
# Should this try to verify template-king's changetest3 branch?
# Can't check merge health on king because it's a bare repo.
# kingdir = self.config.get("broker", "kingdir")
# self.check_git_merge_health(kingdir)
def test_126_publishchangetest4sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest4")
self.successtest(["publish", "--branch", "changetest4"],
env=self.gitenv(), cwd=sandboxdir)
def test_127_deploychangetest4sandbox(self):
command = "deploy --source changetest4 --target changetarget"
self.successtest(command.split(" "))
repo = os.path.join(self.config.get("broker", "domainsdir"),
"changetarget")
self.check_git_merge_health(repo)
template = self.find_template("aquilon", "archetype", "base",
domain="changetarget-tracker")
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by changetest4\n")
def test_130_add_changetest5_sandbox(self):
self.successtest(["add", "sandbox", "--sandbox", "changetest5"])
def test_131_prepare_changetest5(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest5")
filename = os.path.join(sandboxdir, "changetest5.txt")
with open(filename, "w") as f:
f.write("Added by changetest5\n")
self.gitcommand(["add", "changetest5.txt"], cwd=sandboxdir)
self.gitcommand(["commit", "-a", "-m", "added changetest5 comment"],
cwd=sandboxdir)
def test_132_publish_changetest5(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest5")
self.successtest(["publish", "--branch", "changetest5"],
env=self.gitenv(), cwd=sandboxdir)
def test_135_rollback_no_history(self):
command = ["rollback", "--domain", "changetarget-tracker",
"--ref", "changetest5"]
out = self.badrequesttest(command)
self.searchoutput(out, "Cannot roll back to commit: "
"branch changetarget does not contain", command)
def test_140_rollback(self):
command = "rollback --domain changetarget-tracker --lastsync"
self.successtest(command.split(" "))
template = self.find_template("aquilon", "archetype", "base",
domain="changetarget-tracker")
with open(template) as f:
contents = f.readlines()
self.assertNotEqual(contents[-1], "#Added by changetest4\n")
def test_145_failreverserollback(self):
command = "sync --domain changetarget-tracker"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out,
"Tracked branch changetarget is set to not "
"allow sync. Run aq validate",
command)
template = self.find_template("aquilon", "archetype", "base",
domain="changetarget-tracker")
with open(template) as f:
contents = f.readlines()
self.assertNotEqual(contents[-1], "#Added by changetest4\n")
def test_150_deploy_after_rollback(self):
command = ["deploy", "--source", "changetest5",
"--target", "changetarget"]
out = self.badrequesttest(command)
self.matchoutput(out, "Domain changetarget has not been validated",
command)
def test_151_deploy_after_rollback_nosync(self):
command = ["deploy", "--source", "changetest5",
"--target", "changetarget", "--nosync"]
out = self.statustest(command)
self.matchoutput(out, "Updating the checked out copy of domain "
"changetarget...", command)
self.matchclean(out, "changetarget-tracker", command)
def test_152_verify_tracker_directory(self):
domaindir = os.path.join(self.config.get("broker", "domainsdir"),
"changetarget-tracker")
filename = os.path.join(domaindir, "changetest5.txt")
self.assertFalse(os.path.exists(filename))
def test_160_validate(self):
command = "validate --branch changetarget"
self.commandtest(command.split(" "))
def test_165_reverserollback(self):
command = "sync --domain changetarget-tracker"
out = self.statustest(command.split(" "))
self.matchoutput(out, "Updating the checked out copy of domain "
"changetarget-tracker...", command)
template = self.find_template("aquilon", "archetype", "base",
domain="changetarget-tracker")
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by changetest4\n")
domaindir = os.path.join(self.config.get("broker", "domainsdir"),
"changetarget-tracker")
filename = os.path.join(domaindir, "changetest5.txt")
self.assertTrue(os.path.exists(filename))
def test_200_rollback_bad_commit(self):
# This commit ID is from the Linux kernel sources
command = ["rollback", "--domain", "changetarget-tracker",
"--ref", "2dcd0af568b0cf583645c8a317dd12e344b1c72a"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Ref 2dcd0af568b0cf583645c8a317dd12e344b1c72a "
"could not be translated to an existing commit ID.",
command)
def test_200_faildeltrackedsandbox(self):
command = "del sandbox --sandbox changetest4"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out,
"Sandbox changetest4 is tracked by "
"['changetest4-tracker']",
command)
def test_800_delchangetest4tracker(self):
command = "del domain --domain changetest4-tracker"
self.noouttest(command.split(" "))
def test_805_verifydelchangetest4tracker(self):
command = "show domain --domain changetest4-tracker"
self.notfoundtest(command.split(" "))
def test_810_delchangetargettracker(self):
command = "del domain --domain changetarget-tracker"
self.noouttest(command.split(" "))
def test_815_verifydelchangetargettracker(self):
command = "show domain --domain changetarget-tracker"
self.notfoundtest(command.split(" "))
# FIXME: should del_sandbox a sandbox with undeployed changes.
def test_820_delchangetest3sandbox(self):
command = "del sandbox --sandbox changetest3"
self.statustest(command.split(" "))
# This just deletes the branch, so the directory should still be there.
sandboxdir = os.path.join(self.sandboxdir, "changetest3")
self.assertTrue(os.path.exists(sandboxdir))
rmtree(sandboxdir)
def test_825_verifydelchangetest3sandbox(self):
command = "show sandbox --sandbox changetest3"
self.notfoundtest(command.split(" "))
def test_830_delchangetest4sandbox(self):
command = "del sandbox --sandbox changetest4"
self.statustest(command.split(" "))
# This just deletes the branch, so the directory should still be there.
sandboxdir = os.path.join(self.sandboxdir, "changetest4")
self.assertTrue(os.path.exists(sandboxdir))
rmtree(sandboxdir)
def test_840_archive_changetarget(self):
self.noouttest(["update_domain", "--domain=changetarget", "--archived"])
def test_845_del_changetarget(self):
command = "del domain --domain changetarget --justification=tcm=12345678"
self.noouttest(command.split(" "))
self.assertFalse(os.path.exists(os.path.join(
self.config.get("broker", "domainsdir"), "changetest")))
def test_850_del_changetest5(self):
self.statustest(["del_sandbox", "--sandbox", "changetest5"])
sandboxdir = os.path.join(self.sandboxdir, "changetest5")
self.assertTrue(os.path.exists(sandboxdir))
rmtree(sandboxdir)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestMergeConflicts)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
import os
import unittest
from datetime import datetime
from test.config import config
from pysmap import SmappCollection
class TestSmappCollection(unittest.TestCase):
def test_control(self):
self.assertTrue(True)
def test_smapp_bson_collection_iterates(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
self.assertTrue(len(list(collection)) > 0)
def test_smapp_json_collection_iterates(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
self.assertTrue(len(list(collection)) > 0)
def test_smapp_csv_collection_iterates(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['csv']['valid'])
collection = SmappCollection('csv', file_path)
self.assertTrue(len(list(collection)) > 0)
def test_limit_number_of_tweets(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
self.assertTrue(len(list(collection.limit_number_of_tweets(100))) > 0)
# def test_smapp_mongo_collection_iterates(self):
# collection = SmappCollection('mongo',
# config['mongo']['host'],
# config['mongo']['port'],
# config['mongo']['user'],
# config['mongo']['password'],
# config['mongo']['database'],
# config['mongo']['collection'])
# self.assertTrue(len(list(collection.limit_number_of_tweets(100))) > 0)
def test_get_tweet_texts(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
texts = [text for text in collection.limit_number_of_tweets(1).get_tweet_texts()]
self.assertEqual(str, type(texts[0]))
def test_count_tweet_terms(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = collection.count_tweet_terms('jade')
self.assertEqual(167, count)
def test_count_tweet_terms_multiple(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = collection.count_tweet_terms('jade', 'helm')
self.assertEqual(176, count)
def test_count_tweets(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = collection.count_tweets()
self.assertEqual(1187, count)
def test_get_tweets_containing(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.get_tweets_containing('jade')])
self.assertEqual(167, count)
def test_get_tweets_containing_multiple(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.get_tweets_containing('jade', 'helm')])
self.assertEqual(176, count)
def test_get_date_range(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.get_date_range(datetime(2015,11,2), datetime(2015,11,3))])
self.assertEqual(26, count)
def test_find_date_range(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
range_obj = collection.find_date_range()
self.assertEqual(datetime(2015, 11, 2, 19, 56, 33), range_obj['date_min'])
self.assertEqual(datetime(2015, 11, 6, 21, 35, 54), range_obj['date_max'])
def test_tweet_language_is(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.tweet_language_is('en')])
self.assertEqual(825, count)
def test_detect_tweet_language(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.detect_tweet_language('en')])
self.assertEqual(907, count)
def test_user_language_is(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.user_language_is('en')])
self.assertEqual(801, count)
def test_exclude_retweets(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.exclude_retweets()])
self.assertEqual(682, count)
def test_get_retweets(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.get_retweets()])
self.assertEqual(505, count)
def test_user_location_contains(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.user_location_contains('TX')])
self.assertEqual(10, count)
def test_user_description_contains(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
count = len([tweet for tweet in collection.user_description_contains('JESUS')])
self.assertEqual(15, count)
def test_user_id_is(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
count = len([tweet for tweet in collection.user_id_is(379851447, 149751818)])
self.assertEqual(77, count)
def test_place_name_contains_country(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
count = len([tweet for tweet in collection.place_name_contains_country('United States')])
self.assertEqual(6, count)
def test_within_geobox(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
# geobox here is for us mountain time
# i created a coordinate in our data file on the last object [-105.29, 40.33]
# i also added one to the json that is outside of us mountain time [-123.007053, 44.824997]
count = len([tweet for tweet in collection.within_geobox(-113.95, 28.81, -100.05, 48.87)])
self.assertEqual(1, count)
def test_get_geo_enabled(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.get_geo_enabled()])
self.assertEqual(1, count)
def test_get_non_geo_enabled(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
count = len([tweet for tweet in collection.get_non_geo_enabled()])
self.assertEqual(1186, count)
def test_dump_to_bson(self):
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson')
output_path = os.path.dirname(os.path.realpath(__file__)) + '/' + 'data/output.bson'
collection = SmappCollection('bson', os.path.dirname(os.path.realpath(__file__)) +'/'+ config['bson']['valid'])
collection.dump_to_bson(output_path)
self.assertTrue(os.path.getsize(output_path) > 0)
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson')
def test_dump_to_json(self):
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json')
output_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)),'data/output.bson.json')
collection = SmappCollection('bson', os.path.dirname(os.path.realpath(__file__)) +'/'+ config['bson']['valid'])
collection.dump_to_json(output_path)
self.assertTrue(os.path.getsize(output_path) > 0)
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json')
def test_dump_to_csv(self):
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.csv'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.csv')
output_path = os.path.dirname(os.path.realpath(__file__)) + '/' + 'data/output.csv'
collection = SmappCollection('bson', os.path.dirname(os.path.realpath(__file__)) +'/'+ config['bson']['valid'])
collection.dump_to_csv(output_path, ['id_str', 'entities.hashtags.0', 'entities.hashtags.1'])
self.assertTrue(os.path.getsize(output_path) > 0)
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.csv'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.csv')
def test_dump_to_sqlite_db(self):
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.db'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.db')
output_path = os.path.dirname(os.path.realpath(__file__)) + '/' + 'data/output.db'
collection = SmappCollection('bson', os.path.dirname(os.path.realpath(__file__)) +'/'+ config['bson']['valid'])
collection.dump_to_sqlite_db(output_path, ['id_str', 'entities.hashtags.0', 'entities.hashtags.1'])
self.assertTrue(os.path.getsize(output_path) > 0)
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.db'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.db')
def test_get_top_hashtags(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
base_hashtags = {'hashtags': {'2a': 26, 'pjnet': 26, 'jadehelm': 111, 'falseflag': 32, 'JadeHelm': 118}}
hashtags = collection.get_top_hashtags(5)
self.assertTrue(set(hashtags.keys()) == set(base_hashtags.keys()))
def test_get_top_urls(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
urls = collection.get_top_urls(5)
base_urls = {'urls': {'https://t.co/ATzXpRciyr': 18, 'https://t.co/dpz7vZ1JWy': 39, 'https://t.co/l9OEuvRlt8': 24, 'https://t.co/nkc4hnukLX': 21, 'https://t.co/rsNUItS48U': 60}}
self.assertTrue(set(urls.keys()) == set(base_urls.keys()))
def test_get_top_mentions(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
top_mentions = collection.get_top_mentions(5)
base_top_mentions = {'user_mentions': {'233498836': 58, '27234909': 56, '10228272': 75, '1619936671': 41, '733417892': 121}}
self.assertTrue(set(top_mentions.keys()) == set(base_top_mentions.keys()))
def test_get_top_media(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
top_media = collection.get_top_media(5)
base_top_media = {'media': {'https://t.co/pAfigDPcNc': 27, 'https://t.co/MaOGn6wH40': 17, 'https://t.co/TH8TmGuYww': 24, 'https://t.co/YpqDPqA2UO': 14, 'https://t.co/ORaTXOM2oX': 55}}
self.assertTrue(set(top_media.keys()) == set(base_top_media.keys()))
def test_get_top_symbols(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
top_symbols = collection.get_top_symbols(5)
base_top_symbols = {'symbols': {0: None, 'hould': 1, 2: None, 3: None, 1: None}}
self.assertTrue(set(top_symbols.keys()) == set(base_top_symbols.keys()))
def test_get_top_terms(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
top_counts = collection.get_top_terms(10)
base_top_counts = {'Jade': 538, 'Duty:': 146, 'Ops': 265, 'Sevenfold': 216, 'III': 173, 'RT': 524, 'Black': 235, 'Helm': 415, 'Avenged': 220, '-': 193}
self.assertTrue(set(top_counts.keys()) == set(base_top_counts.keys()))
def test_base_top_entities_returns_dict(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
returndict = collection.get_top_entities({'hashtags':5})
self.assertTrue(isinstance(returndict, dict))
def test_base_top_entities_returns_hashtags(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
returndict = collection.get_top_entities({'hashtags':5})
self.assertTrue('hashtags' in returndict)
def test_base_top_entities_returns_hashtags_and_media(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
returndict = collection.get_top_entities({'user_mentions':5, 'media':3})
self.assertTrue('user_mentions' in returndict and 'media' in returndict)
def test_base_top_entities_returns_counts(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
returndict = collection.get_top_entities({'urls':5, 'symbols':3})
if len(returndict['urls']) > 0:
self.assertTrue(len(returndict['urls']) == 5)
if len(returndict['symbols']) > 0:
self.assertTrue(len(returndict['symbols']) == 3)
def test_sample_returns_right_number_of_items(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection = SmappCollection('bson', file_path)
sample_collection = collection.sample(10)
self.assertEqual(10, len(list(sample_collection)))
def test_sample_returns_dif_tweets_than_fist_10_tweets(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection_one = SmappCollection('bson', file_path)
sample_tweets = list(collection_one.sample(10))
collection_two = SmappCollection('bson', file_path)
first_ten_tweets = list(collection_two.limit_number_of_tweets(10))
self.assertNotEqual(sample_tweets, first_ten_tweets)
def test_sample_chains_and_dumps(self):
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json')
output_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)),'data/output.bson.json')
collection = SmappCollection('bson', os.path.dirname(os.path.realpath(__file__)) +'/'+ config['bson']['valid'])
sample_tweets = collection.sample(10)
sample_tweets.dump_to_json(output_path)
self.assertTrue(os.path.getsize(output_path) > 0)
with open(output_path) as f:
self.assertEqual(10, len([line for line in f]))
if os.path.exists(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json'):
os.remove(os.path.dirname(os.path.abspath(__file__))+'/data/output.bson.json')
def test_set_custom_filter_properly_filters(self):
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['bson']['valid'])
collection_one = SmappCollection('bson', file_path)
full_collection_len = len(list(collection_one))
def is_tweet_a_retweet(tweet):
if 'retweeted' in tweet and tweet['retweeted']:
return True
else:
return False
num_retweets = len(list(collection_one.set_custom_filter(is_tweet_a_retweet)))
collection_two = SmappCollection('bson', file_path)
def is_not_a_retweet(tweet):
if 'retweeted' in tweet and tweet['retweeted']:
return False
else:
return True
num_non_retweets = len(list(collection_two.set_custom_filter(is_not_a_retweet)))
self.assertEqual(num_retweets + num_non_retweets, full_collection_len)
if __name__ == '__main__':
unittest.main()
|
|
"""
Package WAGL HDF5 Outputs
This converts the HDF5 file (and sibling fmask/gqa files) into
GeoTIFFS (COGs) with datacube metadata using the DEA naming conventions
for files.
"""
import contextlib
import os
import re
import sys
from datetime import datetime, timedelta
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple
from uuid import UUID
import attr
import numpy
import rasterio
from affine import Affine
from boltons.iterutils import PathAccessError, get_path
from click import secho
from rasterio import DatasetReader
from rasterio.crs import CRS
from rasterio.enums import Resampling
from eodatasets3 import DatasetAssembler, images, serialise, utils
from eodatasets3.images import GridSpec
from eodatasets3.model import DatasetDoc
from eodatasets3.properties import Eo3Interface
from eodatasets3.serialise import loads_yaml
from eodatasets3.ui import bool_style
from eodatasets3.utils import default_utc, flatten_dict
try:
import h5py
except ImportError:
sys.stderr.write(
"eodatasets3 has not been installed with the wagl extras. \n"
" Try `pip install eodatasets3[wagl]\n"
)
raise
POSSIBLE_PRODUCTS = ("nbar", "nbart", "lambertian", "sbt")
DEFAULT_PRODUCTS = ("nbar", "nbart")
_THUMBNAILS = {
"nbar": ("nbar:red", "nbar:green", "nbar:blue"),
"nbart": ("nbart:red", "nbart:green", "nbart:blue"),
}
os.environ["CPL_ZIP_ENCODING"] = "UTF-8"
FILENAME_TIF_BAND = re.compile(
r"(?P<prefix>(?:.*_)?)(?P<band_name>B[0-9][A0-9]|B[0-9]*|B[0-9a-zA-z]*)"
r"(?P<extension>\....)"
)
PRODUCT_SUITE_FROM_GRANULE = re.compile("(L1[GTPCS]{1,2})")
class ProductMaturity(Enum):
provisional = "provisional"
stable = "stable"
def _find_h5_paths(h5_obj: h5py.Group, dataset_class: str = "") -> List[str]:
"""
Find all objects in a h5 of the given class, returning their path.
(class examples: IMAGE, TABLE. SCALAR)
"""
items = []
def _find(name, obj):
if obj.attrs.get("CLASS") == dataset_class:
items.append(name)
h5_obj.visititems(_find)
return items
def _unpack_products(
p: DatasetAssembler, product_list: Iterable[str], h5group: h5py.Group
) -> None:
"""
Unpack and package the NBAR and NBART products.
"""
# listing of all datasets of IMAGE CLASS type
img_paths = _find_h5_paths(h5group, "IMAGE")
for product in product_list:
with sub_product(product, p):
for pathname in [p for p in img_paths if f"/{product.upper()}/" in p]:
with do(f"Path {pathname!r}"):
dataset = h5group[pathname]
band_name = utils.normalise_band_name(dataset.attrs["alias"])
write_measurement_h5(
p,
f"{product}:{band_name}",
dataset,
overview_resampling=Resampling.average,
file_id=_file_id(dataset),
)
if product in _THUMBNAILS:
red, green, blue = _THUMBNAILS[product]
with do(f"Thumbnailing {product}"):
p.write_thumbnail(
red,
green,
blue,
static_stretch=(1, 3000),
# Because of our strange sub-products and filename standards, we want the
# 'kind' to be included in the recorded thumbnail accessory metadata,
# but not in the filename.
# So we manually calculate a filename without the 'kind' field included.
kind=product,
path=p.names.thumbnail_filename(),
)
def write_measurement_h5(
p: DatasetAssembler,
full_name: str,
g: h5py.Dataset,
overviews=images.DEFAULT_OVERVIEWS,
overview_resampling=Resampling.nearest,
expand_valid_data=True,
file_id: str = None,
):
"""
Write a measurement by copying it from a hdf5 dataset.
"""
if hasattr(g, "chunks"):
data = g[:]
else:
data = g
product_name, band_name = full_name.split(":")
p.write_measurement_numpy(
array=data,
grid_spec=images.GridSpec(
shape=g.shape,
transform=Affine.from_gdal(*g.attrs["geotransform"]),
crs=CRS.from_wkt(g.attrs["crs_wkt"]),
),
nodata=g.attrs.get("no_data_value"),
overviews=overviews,
overview_resampling=overview_resampling,
expand_valid_data=expand_valid_data,
file_id=file_id,
# Because of our strange sub-products and filename standards, we want the
# product_name to be included in the recorded band metadata,
# but not in its filename.
# So we manually calculate a filename without the extra product name prefix.
name=full_name,
path=p.names.measurement_filename(band_name, "tif", file_id=file_id),
)
def _file_id(dataset: h5py.Dataset) -> str:
"""
Devise a file id for the given dataset (using its attributes)
Eg. 'band01'
"""
# What we have to work with:
# >>> print(repr((dataset.attrs["band_id"], dataset.attrs["band_name"], dataset.attrs["alias"])))
# ('1', 'BAND-1', 'Blue')
band_name = dataset.attrs["band_id"]
# A purely numeric id needs to be formatted 'band01' according to naming conventions.
return utils.normalise_band_name(band_name)
def _unpack_observation_attributes(
p: DatasetAssembler,
product_list: Iterable[str],
h5group: h5py.Group,
infer_datetime_range=False,
oa_resolution: Optional[Tuple[float, float]] = None,
):
"""
Unpack the angles + other supplementary datasets produced by wagl.
Currently only the mode resolution group gets extracted.
"""
resolution_groups = {
tuple(h5group[k].attrs["resolution"]): h5group[k]
for k in h5group.keys()
if k.startswith("RES-GROUP-")
}
# Use the highest resolution as the ground sample distance.
if "eo:gsd" in p.properties:
del p.properties["eo:gsd"]
p.properties["eo:gsd"] = min(min(resolution_groups.keys()))
res_grp = choose_resolution_group(resolution_groups, p.platform, oa_resolution)
def _write(section: str, dataset_names: Sequence[str]):
"""
Write supplementary attributes as measurement.
"""
for dataset_name in dataset_names:
o = f"{section}/{dataset_name}"
with do(f"Path {o!r} "):
measurement_name = utils.normalise_band_name(dataset_name)
write_measurement_h5(
p,
f"oa:{measurement_name}",
res_grp[o],
# We only use the product bands for valid data calc, not supplementary.
# According to Josh: Supplementary pixels outside of the product bounds are implicitly invalid.
expand_valid_data=False,
overviews=None,
)
_write(
"SATELLITE-SOLAR",
[
"SATELLITE-VIEW",
"SATELLITE-AZIMUTH",
"SOLAR-ZENITH",
"SOLAR-AZIMUTH",
"RELATIVE-AZIMUTH",
"TIME-DELTA",
],
)
_write("INCIDENT-ANGLES", ["INCIDENT-ANGLE", "AZIMUTHAL-INCIDENT"])
_write("EXITING-ANGLES", ["EXITING-ANGLE", "AZIMUTHAL-EXITING"])
_write("RELATIVE-SLOPE", ["RELATIVE-SLOPE"])
_write("SHADOW-MASKS", ["COMBINED-TERRAIN-SHADOW"])
timedelta_data = (
res_grp["SATELLITE-SOLAR/TIME-DELTA"] if infer_datetime_range else None
)
with do("Contiguity", timedelta=bool(timedelta_data)):
_create_contiguity(
p,
product_list,
resolution_yx=tuple(res_grp.attrs["resolution"]),
timedelta_data=timedelta_data,
)
def choose_resolution_group(
resolution_groups: Dict[tuple, h5py.Group],
platform: str,
oa_resolution: Optional[Tuple[float, float]],
) -> h5py.Group:
# None specified? Figure out a default.
if oa_resolution is None:
# For Landsat, we only cared about packaging OA data for the "common"
# bands (not panchromatic). So we always pick the higher resolution.
if platform.startswith("landsat"):
oa_resolution = max(resolution_groups.keys())
elif platform.startswith("sentinel"):
oa_resolution = (10.0, 10.0)
else:
raise NotImplementedError(
f"Don't know how to choose a default OA resolution for platform {platform !r}"
)
res_grp = resolution_groups.get(oa_resolution)
if res_grp is None:
raise RuntimeError(
f"Resolution {oa_resolution} not found in input. "
f"Have resolutions {tuple(resolution_groups.keys())}"
)
return res_grp
def _create_contiguity(
p: DatasetAssembler,
product_list: Iterable[str],
resolution_yx: Tuple[float, float],
timedelta_product: str = "nbar",
timedelta_data: numpy.ndarray = None,
):
"""
Create the contiguity (all pixels valid) dataset.
Write a contiguity mask file based on the intersection of valid data pixels across all
bands from the input files.
"""
for product in product_list:
contiguity = None
for grid, band_name, path in p.iter_measurement_paths():
if not band_name.startswith(f"{product.lower()}:"):
continue
# Only our given res group (no pan band in Landsat)
if grid.resolution_yx != resolution_yx:
continue
with rasterio.open(path) as ds:
ds: DatasetReader
if contiguity is None:
contiguity = numpy.ones((ds.height, ds.width), dtype="uint8")
geobox = GridSpec.from_rio(ds)
elif ds.shape != contiguity.shape:
raise NotImplementedError(
"Contiguity from measurements of different shape"
)
for band in ds.indexes:
contiguity &= ds.read(band) > 0
if contiguity is None:
secho(f"No images found for requested product {product}", fg="red")
continue
p.write_measurement_numpy(
f"oa:{product.lower()}_contiguity",
contiguity,
geobox,
nodata=255,
overviews=None,
expand_valid_data=False,
# Because of our strange sub-products and filename standards, we want the
# 'oa_' prefix to be included in the recorded band metadata,
# but not in its filename.
# So we manually calculate a filename without the extra prefix.
path=p.names.measurement_filename(f"{product.lower()}_contiguity"),
)
# masking the timedelta_data with contiguity mask to get max and min timedelta within the NBAR product
# footprint for Landsat sensor. For Sentinel sensor, it inherits from level 1 yaml file
if timedelta_data is not None and product.lower() == timedelta_product:
valid_timedelta_data = numpy.ma.masked_where(
contiguity == 0, timedelta_data
)
def offset_from_center(v: numpy.datetime64):
return p.datetime + timedelta(
microseconds=v.astype(float) * 1_000_000.0
)
p.datetime_range = (
offset_from_center(numpy.ma.min(valid_timedelta_data)),
offset_from_center(numpy.ma.max(valid_timedelta_data)),
)
@contextlib.contextmanager
def do(name: str, heading=False, **fields):
"""
Informational logging.
TODO: move this to the cli. It shouldn't be part of library usage.
"""
single_line = not heading
def val(v: Any):
if isinstance(v, bool):
return bool_style(v)
if isinstance(v, Path):
return repr(str(v))
return repr(v)
if heading:
name = f"\n{name}"
fields = " ".join(f"{k}:{val(v)}" for k, v in fields.items())
secho(f"{name} {fields} ", nl=not single_line, fg="blue" if heading else None)
yield
if single_line:
secho("(done)")
@contextlib.contextmanager
def sub_product(name: str, p: Eo3Interface):
"""
Set the product family temporarily within a block of code.
This is done for sub-products that WAGL contains, which have
a different 'family' in their filenames.
"""
with do(f"Product {name}", heading=True):
original_family = p.product_family
# We delete first to show that we're deliberately changing the value (no 'overridding property" warning)
del p.product_family
p.product_family = name
yield
del p.product_family
p.product_family = original_family
def _extract_reference_code(p: DatasetAssembler, granule: str) -> Optional[str]:
matches = None
if p.platform.startswith("landsat"):
matches = re.match(r"L\w\d(?P<reference_code>\d{6}).*", granule)
elif p.platform.startswith("sentinel-2"):
matches = re.match(r".*_T(?P<reference_code>\d{1,2}[A-Z]{3})_.*", granule)
if matches:
[reference_code] = matches.groups()
# TODO name properly
return reference_code
return None
@attr.s(auto_attribs=True)
class Granule:
"""
A single granule in a hdf5 file, with optional corresponding fmask/gqa/etc files.
You probably want to make one by using `Granule.for_path()`
"""
name: str
wagl_hdf5: Path
wagl_metadata: Dict
source_level1_metadata: Optional[DatasetDoc]
fmask_doc: Optional[Dict] = None
fmask_image: Optional[Path] = None
gqa_doc: Optional[Dict] = None
tesp_doc: Optional[Dict] = None
@classmethod
def for_path(
cls,
wagl_hdf5: Path,
granule_names: Optional[Sequence[str]] = None,
level1_metadata_path: Optional[Path] = None,
fmask_image_path: Optional[Path] = None,
fmask_doc_path: Optional[Path] = None,
gqa_doc_path: Optional[Path] = None,
tesp_doc_path: Optional[Path] = None,
allow_missing_provenance: bool = False,
):
"""
Create granules by scanning the given hdf5 file.
Optionally specify additional files and level1 path.
If they are not specified it look for them using WAGL's output naming conventions.
:param allow_missing_provenance:
"""
if not wagl_hdf5.exists():
raise ValueError(f"Input hdf5 doesn't exist {wagl_hdf5}")
with h5py.File(wagl_hdf5, "r") as fid:
granule_names = granule_names or fid.keys()
for granule_name in granule_names:
if granule_name not in fid:
raise ValueError(
f"Granule {granule_name!r} not found in file {wagl_hdf5}"
)
wagl_doc_field = get_path(fid, (granule_name, "METADATA", "CURRENT"))
if not wagl_doc_field:
raise ValueError(
f"Granule contains no wagl metadata: {granule_name} in {wagl_hdf5}"
)
[wagl_doc] = loads_yaml(wagl_doc_field[()])
level1 = _load_level1_doc(
wagl_doc, level1_metadata_path, allow_missing_provenance
)
fmask_image_path = fmask_image_path or wagl_hdf5.with_name(
f"{granule_name}.fmask.img"
)
if not fmask_image_path.exists():
raise ValueError(f"No fmask image found at {fmask_image_path}")
fmask_doc_path = fmask_doc_path or fmask_image_path.with_suffix(".yaml")
if not fmask_doc_path.exists():
raise ValueError(f"No fmask found at {fmask_doc_path}")
with fmask_doc_path.open("r") as fl:
[fmask_doc] = loads_yaml(fl)
gqa_doc_path = gqa_doc_path or wagl_hdf5.with_name(
f"{granule_name}.gqa.yaml"
)
if not gqa_doc_path.exists():
raise ValueError(f"No gqa found at {gqa_doc_path}")
with gqa_doc_path.open("r") as fl:
[gqa_doc] = loads_yaml(fl)
# Optional doc
if tesp_doc_path:
# But if they gave us a path, we're strict about it existing.
if not tesp_doc_path.exists():
raise ValueError(
f"Supplied tesp doc path doesn't exist: {tesp_doc_path}"
)
else:
tesp_doc_path = wagl_hdf5.with_name(f"{granule_name}.tesp.yaml")
if tesp_doc_path.exists():
with tesp_doc_path.open("r") as fl:
[tesp_doc] = loads_yaml(fl)
yield cls(
name=granule_name,
wagl_hdf5=wagl_hdf5,
wagl_metadata=wagl_doc,
source_level1_metadata=level1,
fmask_doc=fmask_doc,
fmask_image=fmask_image_path,
gqa_doc=gqa_doc,
tesp_doc=tesp_doc,
)
def _load_level1_doc(
wagl_doc: Dict,
user_specified_l1_path: Optional[Path] = None,
allow_missing_provenance=False,
):
if user_specified_l1_path:
if not user_specified_l1_path.exists():
raise ValueError(
f"No level1 metadata found at given path {user_specified_l1_path}"
)
level1_path = user_specified_l1_path
else:
level1_path = Path(get_path(wagl_doc, ("source_datasets", "source_level1")))
# If a directory, assume "<dirname>.odc-metadata.yaml"
if level1_path.is_dir():
metadata_path = level1_path / (level1_path.name + ".odc-metadata.yaml")
# Otherwise it's a sibling file with ".odc-metadata.yaml" suffix
else:
if level1_path.suffix.lower() == ".yaml":
metadata_path = level1_path
else:
metadata_path = level1_path.with_suffix(".odc-metadata.yaml")
if not metadata_path.exists():
if not allow_missing_provenance:
raise ValueError(
"No level1 found or provided. "
f"WAGL said it was at path {str(level1_path)!r}. "
"Which has no metadata doc we can find, and you didn't specify an alternative. "
f"(allow_missing_provenance={allow_missing_provenance})"
)
return None
return serialise.from_path(metadata_path)
def package_file(
out_directory: Path,
hdf_file: Path,
included_products: Iterable[str] = DEFAULT_PRODUCTS,
include_oa: bool = True,
) -> Dict[UUID, Path]:
"""
Simple alternative to package().
Takes a single HDF5 and infers other paths (gqa etc) via naming conventions.
Returns a dictionary of the output datasets: Mapping UUID to the their metadata path.
"""
out = {}
for granule in Granule.for_path(hdf_file):
dataset_id, metadata_path = package(
out_directory,
granule,
included_products=included_products,
include_oa=include_oa,
)
out[dataset_id] = metadata_path
return out
def package(
out_directory: Path,
granule: Granule,
*,
product_maturity: ProductMaturity = ProductMaturity.stable,
included_products: Iterable[str] = DEFAULT_PRODUCTS,
include_oa: bool = True,
oa_resolution: Optional[Tuple[float, float]] = None,
) -> Tuple[UUID, Path]:
"""
Package an L2 product.
:param include_oa:
:param out_directory:
The base directory for output datasets. A DEA-naming-conventions folder hierarchy
will be created inside this folder.
:param granule:
Granule information. You probably want to make one with Granule.from_path()
:param included_products:
A list of imagery products to include in the package.
Defaults to all products.
:return:
The dataset UUID and output metadata path
"""
included_products = tuple(s.lower() for s in included_products)
with h5py.File(granule.wagl_hdf5, "r") as fid:
granule_group = fid[granule.name]
wagl_doc = _read_wagl_metadata(granule_group)
with DatasetAssembler(
out_directory.absolute(),
# WAGL stamps a good, random ID already.
dataset_id=granule.wagl_metadata.get("id"),
naming_conventions="dea_s2"
if ("sentinel" in wagl_doc["source_datasets"]["platform_id"].lower())
else "dea",
) as p:
_apply_wagl_metadata(p, wagl_doc)
# It's a GA ARD product.
p.producer = "ga.gov.au"
p.product_family = "ard"
p.maturity = _determine_maturity(
acq_date=p.datetime,
processed=p.processed,
wagl_doc=wagl_doc,
)
# We don't bother including product maturity if it's stable, for consistency with old datasets.
# Stable is the assumed default.
if product_maturity is not ProductMaturity.stable:
p.product_maturity = product_maturity
if granule.source_level1_metadata is not None:
# For historical consistency: we want to use the instrument that the source L1 product
# came from, not the instruments reported from the WAGL doc.
#
# Eg.
# Level 1 will say "OLI_TIRS", while wagl doc will say "OLI".
# Our current C3 products say "OLI_TIRS" so we need to stay consistent.
# (even though WAGL only *used* the OLI bands, it came from an OLI_TIRS observation)
#
# So delete our current wagl one, since we're adding a source dataset:
if p.instrument is not None:
del p.properties["eo:instrument"]
p.add_source_dataset(
granule.source_level1_metadata, auto_inherit_properties=True
)
# When level 1 is NRT, ARD is always NRT.
if granule.source_level1_metadata.maturity == "nrt":
p.maturity = "nrt"
org_collection_number = utils.get_collection_number(
p.platform, p.producer, p.properties.get("landsat:collection_number")
)
p.dataset_version = f"{org_collection_number}.2.1"
p.region_code = _extract_reference_code(p, granule.name)
_read_gqa_doc(p, granule.gqa_doc)
_read_fmask_doc(p, granule.fmask_doc)
if granule.tesp_doc:
_take_software_versions(p, granule.tesp_doc)
_unpack_products(p, included_products, granule_group)
if include_oa:
with sub_product("oa", p):
with do("Starting OA", heading=True):
_unpack_observation_attributes(
p,
included_products,
granule_group,
infer_datetime_range=p.platform.startswith("landsat"),
oa_resolution=oa_resolution,
)
if granule.fmask_image:
with do(f"Writing fmask from {granule.fmask_image} "):
p.write_measurement(
"oa:fmask",
granule.fmask_image,
expand_valid_data=False,
overview_resampling=Resampling.mode,
# Because of our strange sub-products and filename standards, we want the
# 'oa_' prefix to be included in the recorded band metadata,
# but not in its filename.
# So we manually calculate a filename without the extra prefix.
path=p.names.measurement_filename("fmask"),
)
with do("Finishing package"):
return p.done()
def _read_gqa_doc(p: DatasetAssembler, doc: Dict):
_take_software_versions(p, doc)
p.extend_user_metadata("gqa", doc)
# TODO: more of the GQA fields?
for k, v in flatten_dict(doc["residual"], separator="_"):
p.properties[f"gqa:{k}"] = v
def _read_fmask_doc(p: DatasetAssembler, doc: Dict):
for name, value in doc["percent_class_distribution"].items():
# From Josh: fmask cloud cover trumps the L1 cloud cover.
if name == "cloud":
if "eo:cloud_cover" in p.properties:
del p.properties["eo:cloud_cover"]
p.properties["eo:cloud_cover"] = value
p.properties[f"fmask:{name}"] = value
_take_software_versions(p, doc)
p.extend_user_metadata("fmask", doc)
def _take_software_versions(p: DatasetAssembler, doc: Dict):
versions = doc.pop("software_versions", {})
for name, o in versions.items():
p.note_software_version(name, o.get("repo_url"), o.get("version"))
def find_a_granule_name(wagl_hdf5: Path) -> str:
"""
Try to extract granule name from wagl filename,
>>> find_a_granule_name(Path('LT50910841993188ASA00.wagl.h5'))
'LT50910841993188ASA00'
>>> find_a_granule_name(Path('S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09.wagl.h5'))
'S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09'
>>> find_a_granule_name(Path('my-test-granule.h5'))
Traceback (most recent call last):
...
ValueError: Does not appear to be a wagl output filename? 'my-test-granule.h5'.
"""
if not wagl_hdf5.name.endswith(".wagl.h5"):
raise ValueError(
f"Does not appear to be a wagl output filename? {wagl_hdf5.name!r}."
)
return wagl_hdf5.name[: -len(".wagl.h5")]
def _read_wagl_metadata(granule_group: h5py.Group):
try:
wagl_path, *ancil_paths = (
pth for pth in _find_h5_paths(granule_group, "SCALAR") if "METADATA" in pth
)
except ValueError:
raise ValueError("No nbar metadata found in granule")
[wagl_doc] = loads_yaml(granule_group[wagl_path][()])
for i, path in enumerate(ancil_paths, start=2):
wagl_doc.setdefault(f"wagl_{i}", {}).update(
list(loads_yaml(granule_group[path][()]))[0]["ancillary"]
)
return wagl_doc
def _apply_wagl_metadata(p: DatasetAssembler, wagl_doc: Dict):
source = wagl_doc["source_datasets"]
p.datetime = source["acquisition_datetime"]
p.platform = source["platform_id"]
p.instrument = source["sensor_id"]
try:
p.processed = get_path(wagl_doc, ("system_information", "time_processed"))
except PathAccessError:
raise RuntimeError("WAGL dataset contains no processed time.")
_take_software_versions(p, wagl_doc)
p.extend_user_metadata("wagl", wagl_doc)
def _determine_maturity(acq_date: datetime, processed: datetime, wagl_doc: Dict):
"""
Determine maturity field of a dataset.
Based on the fallback logic in nbar pages of CMI, eg: https://cmi.ga.gov.au/ga_ls5t_nbart_3
"""
ancillary_tiers = {
key.lower(): o["tier"]
for key, o in wagl_doc["ancillary"].items()
if "tier" in o
}
if "water_vapour" not in ancillary_tiers:
# Perhaps this should be a warning, but I'm being strict until told otherwise.
# (a warning is easy to ignore)
raise ValueError(
f"No water vapour ancillary tier. Got {list(ancillary_tiers.keys())!r}"
)
water_vapour_is_definitive = ancillary_tiers["water_vapour"].lower() == "definitive"
if (processed - acq_date) < timedelta(hours=48):
return "nrt"
if not water_vapour_is_definitive:
return "interim"
# For accurate BRDF, both Aqua and Terra need to be operating.
# Aqua launched May 2002, and we add a ~2 month buffer of operation.
if acq_date < default_utc(datetime(2002, 7, 1)):
return "final"
if "brdf" not in ancillary_tiers:
# Perhaps this should be a warning, but I'm being strict until told otherwise.
# (a warning is easy to ignore)
raise ValueError(
f"No brdf tier available. Got {list(ancillary_tiers.keys())!r}"
)
brdf_tier = ancillary_tiers["brdf"].lower()
if "definitive" in brdf_tier:
return "final"
elif "fallback" in brdf_tier:
return "interim"
else:
# This value should not occur for production data, only for experiments
return "user"
|
|
import numpy
from residuals import Robust
import result as pod_result
try:
from scipy.optimize import leastsq
except:
print 'Scipy is not installed'
try:
from lmfit import minimize, Parameters
except:
print 'lmfit-py is not installed'
class Algorithm(object):
def __init__(self):
pass
def setName(self, name):
self.name = name
def setDataSets(self, data_sets):
self.data_sets = data_sets
def setFitter(self, fitter):
self.fitter = fitter
def objFunc(self, p):
res = numpy.array([])
for i in self.data_sets:
res = numpy.hstack((res, i.getWeightedResiduals(p)))
return res
def solve(self, parameters):
self.parameters = parameters
self.parameter_names = [i.name for i in parameters]
result = self.optimize(parameters)
return self.createResultsObject(result)
def optimize(self, parameters):
assert False, 'The algorithm does not implement the method: optimize'
def createResultsObject(self, result):
assert False, 'The algorithm does not implement the method: createResultsObject'
def _calcNumberOfObservations(self):
n = 0
for data_set in self.data_sets:
n += len(data_set.v)
return n
def _calcDegOfFreedomT(self, n):
return n - 1.0
def _calcDegOfFreedomE(self, n, p):
return n - p - 1.0
def _calcR2(self, ss_err, ss_tot):
return 1.0 - ss_err / ss_tot
def _calcStdDev(self, ss_err, residuals):
return numpy.sqrt(ss_err / len(residuals))
def _calcSS_err(self, residuals):
return numpy.sum(numpy.square(residuals))
def _calcSS_total(self):
v = numpy.array([])
for data_set in self.data_sets:
v = numpy.hstack((v, data_set.v))
v_mean = v.mean()
ss_tot = numpy.array([])
for data_set in self.data_sets:
ss_tot = numpy.hstack((ss_tot, data_set.getWeightedResiduals(mean_v=v_mean)))
return numpy.sum(numpy.square(ss_tot))
class scipy_leastsq(Algorithm):
def __init__(self):
Algorithm.__init__(self)
self.ftol = 1.49012e-08
self.xtol = 1.49012e-08
self.gtol = 0.0
self.maxfev = 0
self.factor = 100
self.diag = None
def optimize(self, parameters):
x0 = [i.init for i in parameters]
return leastsq(self.objFunc, x0, Dfun=None, full_output=1, ftol=self.ftol, xtol=self.xtol, gtol=self.gtol, maxfev=self.maxfev, factor=self.factor, diag=self.diag)
def createResultsObject(self, result):
r = pod_result.Result(self.name, self.data_sets, result)
r.parameters = self.parameters
r.parameter_names = self.parameter_names
r.fitted_parameters = result[0]
r.cov_matrix = result[1]
info_dict = result[2]
r.num_func_evals = info_dict['nfev']
r.residuals = info_dict['fvec']
r.message = result[3]
r.success = result[4] in [1, 2, 3, 4]
if (result[0] < 0).any():
r.success = False
if not r.success:
return r
r.n = self._calcNumberOfObservations()
r.deg_freedom_t = self._calcDegOfFreedomT(r.n)
r.deg_freedom_e = self._calcDegOfFreedomE(r.n, len(r.fitted_parameters))
r.ss_tot = self._calcSS_total()
r.ss_err = self._calcSS_err(r.residuals)
r.r2 = self._calcR2(r.ss_err, r.ss_tot)
r.r2_adj = self._calcR2(r.ss_err / r.deg_freedom_e, r.ss_tot / r.deg_freedom_t)
r.std_dev = self._calcStdDev(r.ss_err, r.residuals)
r.std_err = self._calcStdErr(r.cov_matrix, r.ss_err, r.n, len(r.fitted_parameters))
return r
def _calcStdErr(self, cov_matrix, ss_err, n, p):
if cov_matrix is None:
return
return numpy.sqrt(ss_err / (n - p) * numpy.diag(cov_matrix))
class robust_biweight(scipy_leastsq):
def setFitter(self, fitter):
self.fitter = fitter
for dataset in self.fitter.data_sets:
dataset.weighter = Robust()
def optimize(self, parameters):
# print "Start optimization"
x0 = [i.init for i in parameters]
res = leastsq(self.objFunc, x0, Dfun=None, full_output=1, ftol=self.ftol, xtol=self.xtol, gtol=self.gtol, maxfev=self.maxfev, factor=self.factor, diag=self.diag)
iterations = 0
parameters_unchanged = 0
while(True):
x0 = res[0]
# print "Adjust weighting"
for ds in self.fitter.data_sets:
v_cap = ds.getSimulatedY(x0)
e = ds.getResiduals(x0)
ds.weighter.adjustWeighting(v_cap, e)
# print 'Re-optimize'
res = leastsq(self.objFunc, x0, Dfun=None, full_output=1, ftol=self.ftol, xtol=self.xtol, gtol=self.gtol, maxfev=self.maxfev, factor=self.factor, diag=self.diag)
# if (res[0] == x0).all():
if self.parameters_unchanged(res[0], x0):
parameters_unchanged += 1
if parameters_unchanged > 5:
print "Solution converged after {} iterations".format(iterations-5)
break
else:
parameters_unchanged = 0
iterations += 1
# If we don't have convergence after 150 cycles (where weights stop changing)
# there is no point in further iterations.
if iterations >= 200:
print "Max iterations ({}) reached before convergence".format(iterations)
break
if (res[0] < 0).any():
print "No solution found after {} iterations (negative parameters)".format(iterations)
break
return res
def parameters_unchanged(self, a, b):
"""
Checks whether the elements of a and b agree up to the first 5 digits
"""
sig = 5
return (
(a == b).all()
or ((a*10**sig).astype(numpy.int32) == (b*10**sig).astype(numpy.int32)).all()
)
def _calcNumberOfObservations(self):
n = 0
for data_set in self.data_sets:
wi = data_set.weighter.wi
Wi = data_set.weighter.Wi
n += numpy.sum(Wi/wi)
return n
class lmfit_minimize(Algorithm):
def __init__(self):
Algorithm.__init__(self)
self.kws = {}
self.kws['engine'] = 'leastsq' # {'leastsq, anneal, lbfgsb'}
def funcWrapper(self, parameters):
p = [parameters[v].value for v in parameters]
return self.objFunc(p)
def buildLmfitParameters(self, parameters):
lp = Parameters()
for p in parameters:
lp.add(p.name, value=p.init, min=p.min, max=p.max)
for k in p.kws:
setattr(lp[p.name], k, p.kws[k])
return lp
def optimize(self, parameters):
self.lmfit_parameters = self.buildLmfitParameters(parameters)
return minimize(self.funcWrapper, self.lmfit_parameters, **self.kws)
def createResultsObject(self, result):
# lmfit_py's results contains stuff that cannot be pickled
r = pod_result.Result(self.name, self.data_sets, None)
r.parameters = self.parameters
r.lmfit_parameters = self.lmfit_parameters
r.parameter_names = self.parameter_names
r.fitted_parameters = []
for pn in r.parameter_names:
r.fitted_parameters.append(r.lmfit_parameters[pn].value)
r.std_err = None
if result.errorbars:
r.std_err = []
for pn in r.parameter_names:
r.std_err.append(r.lmfit_parameters[pn].stderr)
r.num_func_evals = result.nfev
r.residuals = result.residual
if result.errorbars:
r.cov_matrix = result.covar
r.covar_scaled = result.scale_covar
r.message = result.lmdif_message
r.success = result.success
if not r.success:
return r
r.n = self._calcNumberOfObservations()
r.deg_freedom_t = self._calcDegOfFreedomT(r.n)
r.deg_freedom_e = self._calcDegOfFreedomE(r.n, len(r.fitted_parameters))
r.ss_tot = self._calcSS_total()
r.ss_err = self._calcSS_err(r.residuals)
r.r2 = self._calcR2(r.ss_err, r.ss_tot)
r.r2_adj = self._calcR2(r.ss_err / r.deg_freedom_e, r.ss_tot / r.deg_freedom_t)
r.std_dev = self._calcStdDev(r.ss_err, r.residuals)
return r
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""A RunConfig subclass with TPU support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import numpy as np
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=protected-access
_TF_CONFIG_ENV = run_config_lib._TF_CONFIG_ENV
_SERVICE_KEY = run_config_lib._SERVICE_KEY
_TPU_WORKER_JOB_NAME = 'tpu_worker_job_name'
_NUM_CORES_PER_HOST = 8
# pylint: enable=protected-access
# TODO(b/72511246) Provide a simplified api to configure model parallelism.
class TPUConfig(
collections.namedtuple('TPUConfig', [
'iterations_per_loop',
'num_shards',
'computation_shape',
'per_host_input_for_training',
'tpu_job_name',
'initial_infeed_sleep_secs',
])):
r"""TPU related configuration required by `TPUEstimator`.
Args:
iterations_per_loop: This is the number of train steps running in TPU
system before returning to CPU host for each `Session.run`. This means
global step is increased `iterations_per_loop` times in one `Session.run`.
It is recommended to be set as number of global steps for next checkpoint.
num_shards: The number of model replicas in the system. For
non-model-parallelism case, this number equals the total number of TPU
cores. For model-parallelism, the total number of TPU cores equals
product(computation_shape) * num_shards.
computation_shape: A list of size 3 which describes the shape of a model
replica's block of cores. This is required by model-parallelism which
enables partitioning the model to multiple cores. For example, [2, 2, 1]
means the model
is partitioned across 4 cores which span two cores in both x and y
coordinates. Set it to `None` for non-model-parallelism. Please refer to
${tf.contrib.tpu.TopologyProto} for the geometry of a TPU mesh.
per_host_input_for_training: If `True`, `input_fn` is invoked Per-Host
rather than Per-Core. With Per-Host input pipeline deployment, `input_fn`
is invoked once on each host. With Per-Core input pipeline deployment, it
is invoked once for each core. To be precise, with a global batch size
`train_batch_size` in `TPUEstimator` constructor, the batch size for each
shard is `train_batch_size` // #hosts. With Per-Core input pipeline
deployment, the shard batch size is `train_batch_size` // #cores.
tpu_job_name: The name of the TPU job. Typically, this name is auto-inferred
within TPUEstimator, however when using ClusterSpec propagation in more
esoteric cluster configurations, you may need to specify the job name as a
string.
initial_infeed_sleep_secs: The number of seconds the infeed thread should
wait before enqueueing the first batch. This helps avoid timeouts for
models that require a long compilation time.
Raises:
ValueError: If `computation_shape` or `computation_shape` are invalid.
"""
def __new__(cls,
iterations_per_loop=2,
num_shards=2,
computation_shape=None,
per_host_input_for_training=True,
tpu_job_name=None,
initial_infeed_sleep_secs=None):
# Check iterations_per_loop.
util_lib.check_positive_integer(iterations_per_loop,
'TPUConfig iterations_per_loop')
# Check num_shards.
util_lib.check_positive_integer(num_shards, 'TPUConfig num_shards')
# Check computation_shape
if computation_shape is not None and len(computation_shape) != 3:
raise ValueError(
'computation_shape must be a list with length 3 or None; got {}'.
format(str(computation_shape)))
if computation_shape is not None:
computation_shape_array = np.asarray(computation_shape, dtype=np.int32)
# This prevents any computation being replicated across multiple hosts, so
# that each host feeds the same number of computations.
if any(computation_shape_array < 1) or any(computation_shape_array > 2):
raise ValueError('computation_shape elements can only be 1 or 2; got '
'computation_shape={}'.format(computation_shape))
max_replicas_per_host = (
_NUM_CORES_PER_HOST // np.prod(computation_shape_array))
if num_shards > max_replicas_per_host and (
num_shards % max_replicas_per_host != 0):
raise ValueError(
'{0} shards can not be evenly distributed across'
' multiple hosts. Each shard needs {1} cores and each'
' host has {2} cores. Thus {0} shards needs {3} hosts.'
' Please adjust num shards so that num_shards is'
' divisible by {4} or <= {4}.'.format(
num_shards, np.prod(computation_shape), _NUM_CORES_PER_HOST,
num_shards / max_replicas_per_host, max_replicas_per_host))
# Check initial_infeed_sleep_secs.
if initial_infeed_sleep_secs:
util_lib.check_positive_integer(initial_infeed_sleep_secs,
'TPUConfig initial_infeed_sleep_secs')
tpu_job_name = tpu_job_name or _get_tpu_job_name_from_tf_config()
return super(TPUConfig, cls).__new__(
cls,
iterations_per_loop=iterations_per_loop,
num_shards=num_shards,
computation_shape=computation_shape,
per_host_input_for_training=per_host_input_for_training,
tpu_job_name=tpu_job_name,
initial_infeed_sleep_secs=initial_infeed_sleep_secs)
class RunConfig(run_config_lib.RunConfig):
"""RunConfig with TPU support."""
def __init__(self,
tpu_config=None,
evaluation_master=None,
master=None,
**kwargs):
"""Constructs a RunConfig.
Args:
tpu_config: the TPUConfig that specifies TPU-specific configuration.
evaluation_master: a string. The address of the master to use for eval.
Defaults to master if not set.
master: a string. The address of the master to use for training.
**kwargs: keyword config parameters.
"""
super(RunConfig, self).__init__(**kwargs)
self._tpu_config = tpu_config or TPUConfig()
# If user sets master and/or evaluation_master explicilty, including empty
# string '', take it. Otherwise, take the values set by parent class.
if master is not None:
self._master = master
if evaluation_master is not None:
self._evaluation_master = evaluation_master
elif (not self._evaluation_master and
self.task_type != run_config_lib.TaskType.EVALUATOR):
# If the task type is EVALUATOR, it means some cluster manager sets the
# TF_CONFIG. In that case, we respect the configuration in TF_CONFIG.
#
# Otherwise, it means user executes the code without external cluster
# manager. For that, we optimize the user experience by setting
# evaluation_master to master, unless user overwrites it.
self._evaluation_master = self._master
@property
def evaluation_master(self):
return self._evaluation_master
@property
def master(self):
return self._master
@property
def tpu_config(self):
return self._tpu_config
def replace(self, **kwargs):
if 'tpu_config' not in kwargs:
return super(RunConfig, self).replace(**kwargs)
tpu_config = kwargs.pop('tpu_config')
new_instance = super(RunConfig, self).replace(**kwargs)
new_instance._tpu_config = tpu_config # pylint: disable=protected-access
return new_instance
def _get_tpu_job_name_from_tf_config():
"""Extracts the TPU job name from TF_CONFIG env variable."""
# TODO(xiejw): Extends this to support both TF_CONFIG env variable and cluster
# spec propagation.
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV, '{}'))
tpu_job_name = tf_config.get(_SERVICE_KEY, {}).get(_TPU_WORKER_JOB_NAME)
if tpu_job_name:
logging.info('Load TPU job name from TF_CONFIG: %s', tpu_job_name)
return tpu_job_name
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains the classes to build a ConversionElectrode.
"""
from typing import Iterable, Dict
from dataclasses import dataclass
from monty.dev import deprecated
from scipy.constants import N_A
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.reaction_calculator import BalancedReaction
from pymatgen.apps.battery.battery_abc import AbstractElectrode, AbstractVoltagePair
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.units import Charge, Time
from pymatgen.entries.computed_entries import ComputedEntry
@dataclass
class ConversionElectrode(AbstractElectrode):
"""
Class representing a ConversionElectrode, since it is dataclass
this object can be constructed for the attributes.
However, it is usually easier to construct a ConversionElectrode using one of the classmethods
constructors provided.
Attribute:
voltage_pairs: The voltage pairs making up the Conversion Electrode.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li.
_initial_comp_formula: Starting composition for ConversionElectrode represented
as a string/formula.
"""
_initial_comp_formula: str
@property
def initial_comp(self) -> Composition:
"""
The pymatgen Composition representation of the initial composition
"""
return Composition(self._initial_comp_formula)
@classmethod
def from_composition_and_pd(cls, comp, pd, working_ion_symbol="Li", allow_unstable=False):
"""
Convenience constructor to make a ConversionElectrode from a
composition and a phase diagram.
Args:
comp:
Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
pd:
A PhaseDiagram of the relevant system (e.g., Li-Fe-F)
working_ion_symbol:
Element symbol of working ion. Defaults to Li.
allow_unstable:
Allow compositions that are unstable
"""
working_ion = Element(working_ion_symbol)
entry = None
working_ion_entry = None
for e in pd.stable_entries:
if e.composition.reduced_formula == comp.reduced_formula:
entry = e
elif e.is_element and e.composition.reduced_formula == working_ion_symbol:
working_ion_entry = e
if not allow_unstable and not entry:
raise ValueError("Not stable compound found at composition {}.".format(comp))
profile = pd.get_element_profile(working_ion, comp)
# Need to reverse because voltage goes form most charged to most
# discharged.
profile.reverse()
if len(profile) < 2:
return None
working_ion_entry = working_ion_entry
working_ion = working_ion_entry.composition.elements[0].symbol
normalization_els = {}
for el, amt in comp.items():
if el != Element(working_ion):
normalization_els[el] = amt
framework = comp.as_dict()
if working_ion in framework:
framework.pop(working_ion)
framework = Composition(framework)
vpairs = [
ConversionVoltagePair.from_steps(
profile[i],
profile[i + 1],
normalization_els,
framework_formula=framework.reduced_formula,
)
for i in range(len(profile) - 1)
]
return cls(
voltage_pairs=vpairs,
working_ion_entry=working_ion_entry,
_initial_comp_formula=comp.reduced_formula,
_framework_formula=framework.reduced_formula,
)
@classmethod
def from_composition_and_entries(cls, comp, entries_in_chemsys, working_ion_symbol="Li", allow_unstable=False):
"""
Convenience constructor to make a ConversionElectrode from a
composition and all entries in a chemical system.
Args:
comp: Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
entries_in_chemsys: Sequence containing all entries in a
chemical system. E.g., all Li-Fe-F containing entries.
working_ion_symbol: Element symbol of working ion. Defaults to Li.
allow_unstable: If True, allow any composition to be used as the
starting point of a conversion voltage curve, this is useful
for comparing with insertion electrodes
"""
pd = PhaseDiagram(entries_in_chemsys)
return cls.from_composition_and_pd(comp, pd, working_ion_symbol, allow_unstable)
def get_sub_electrodes(self, adjacent_only=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set true
Returns:
A list of ConversionElectrode objects
"""
# voltage_pairs = vpairs, working_ion_entry = working_ion_entry,
# _initial_comp_formula = comp.reduced_formula, _framework_formula = framework.reduced_formula
if adjacent_only:
return [
self.__class__(
voltage_pairs=self.voltage_pairs[i : i + 1],
working_ion_entry=self.working_ion_entry,
_initial_comp_formula=self._initial_comp_formula,
_framework_formula=self._framework_formula,
)
for i in range(len(self.voltage_pairs))
]
sub_electrodes = []
for i in range(len(self.voltage_pairs)):
for j in range(i, len(self.voltage_pairs)):
sub_electrodes.append(
self.__class__(
voltage_pairs=self.voltage_pairs[i : j + 1],
working_ion_entry=self.working_ion_entry,
_initial_comp_formula=self._initial_comp_formula,
_framework_formula=self._framework_formula,
)
)
return sub_electrodes
def is_super_electrode(self, conversion_electrode):
"""
Checks if a particular conversion electrode is a sub electrode of the
current electrode. Starting from a more lithiated state may result in
a subelectrode that is essentially on the same path. For example, a
ConversionElectrode formed by starting from an FePO4 composition would
be a super_electrode of a ConversionElectrode formed from an LiFePO4
composition.
"""
for pair1 in conversion_electrode:
rxn1 = pair1.rxn
all_formulas1 = {
rxn1.all_comp[i].reduced_formula for i in range(len(rxn1.all_comp)) if abs(rxn1.coeffs[i]) > 1e-5
}
for pair2 in self:
rxn2 = pair2.rxn
all_formulas2 = {
rxn2.all_comp[i].reduced_formula for i in range(len(rxn2.all_comp)) if abs(rxn2.coeffs[i]) > 1e-5
}
if all_formulas1 == all_formulas2:
break
else:
return False
return True
def __eq__(self, conversion_electrode):
"""
Check if two electrodes are exactly the same:
"""
if len(self) != len(conversion_electrode):
return False
for pair1 in conversion_electrode:
rxn1 = pair1.rxn
all_formulas1 = {
rxn1.all_comp[i].reduced_formula for i in range(len(rxn1.all_comp)) if abs(rxn1.coeffs[i]) > 1e-5
}
for pair2 in self:
rxn2 = pair2.rxn
all_formulas2 = {
rxn2.all_comp[i].reduced_formula for i in range(len(rxn2.all_comp)) if abs(rxn2.coeffs[i]) > 1e-5
}
if all_formulas1 == all_formulas2:
break
else:
return False
return True
def __hash__(self):
return 7
def __str__(self):
return self.__repr__()
def __repr__(self):
output = [
"Conversion electrode with formula {} and nsteps {}".format(
self.initial_comp.reduced_formula, self.num_steps
),
"Avg voltage {} V, min voltage {} V, max voltage {} V".format(
self.get_average_voltage(), self.min_voltage, self.max_voltage
),
"Capacity (grav.) {} mAh/g, capacity (vol.) {} Ah/l".format(
self.get_capacity_grav(), self.get_capacity_vol()
),
"Specific energy {} Wh/kg, energy density {} Wh/l".format(
self.get_specific_energy(), self.get_energy_density()
),
]
return "\n".join(output)
def get_summary_dict(self, print_subelectrodes=True) -> Dict:
"""
Generate a summary dict.
Populates the summary dict with the basic information from the parent method then populates more information.
Since the parent method calls self.get_summary_dict(print_subelectrodes=True) for the subelectrodes.
The current methode will be called from within super().get_summary_dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
d = super().get_summary_dict(print_subelectrodes=print_subelectrodes)
d["reactions"] = []
d["reactant_compositions"] = []
comps = []
frac = []
for pair in self.voltage_pairs:
rxn = pair.rxn
frac.append(pair.frac_charge)
frac.append(pair.frac_discharge)
d["reactions"].append(str(rxn))
for i, v in enumerate(rxn.coeffs):
if abs(v) > 1e-5 and rxn.all_comp[i] not in comps:
comps.append(rxn.all_comp[i])
if abs(v) > 1e-5 and rxn.all_comp[i].reduced_formula != d["working_ion"]:
reduced_comp = rxn.all_comp[i].reduced_composition
comp_dict = reduced_comp.as_dict()
d["reactant_compositions"].append(comp_dict)
return d
@deprecated(
replacement=get_summary_dict,
message="Name and logic changed, will be as_dict_summary will be removed in the futurn.",
)
def as_dict_summary(self, print_subelectrodes=True):
"""
Args:
print_subelectrodes:
Also print data on all the possible subelectrodes
Returns:
a summary of this electrode"s properties in dictionary format
"""
d = {}
framework_comp = Composition(
{k: v for k, v in self.initial_comp.items() if k.symbol != self.working_ion.symbol}
)
d["framework"] = framework_comp.to_data_dict
d["framework_pretty"] = framework_comp.reduced_formula
d["average_voltage"] = self.get_average_voltage()
d["max_voltage"] = self.max_voltage
d["min_voltage"] = self.min_voltage
d["max_delta_volume"] = self.max_delta_volume
d["max_instability"] = 0
d["max_voltage_step"] = self.max_voltage_step
d["nsteps"] = self.num_steps
d["capacity_grav"] = self.get_capacity_grav()
d["capacity_vol"] = self.get_capacity_vol()
d["energy_grav"] = self.get_specific_energy()
d["energy_vol"] = self.get_energy_density()
d["working_ion"] = self.working_ion.symbol
d["reactions"] = []
d["reactant_compositions"] = []
comps = []
frac = []
for pair in self.voltage_pairs:
rxn = pair.rxn
frac.append(pair.frac_charge)
frac.append(pair.frac_discharge)
d["reactions"].append(str(rxn))
for i, v in enumerate(rxn.coeffs):
if abs(v) > 1e-5 and rxn.all_comp[i] not in comps:
comps.append(rxn.all_comp[i])
if abs(v) > 1e-5 and rxn.all_comp[i].reduced_formula != d["working_ion"]:
reduced_comp = rxn.all_comp[i].reduced_composition
comp_dict = reduced_comp.as_dict()
d["reactant_compositions"].append(comp_dict)
d["fracA_charge"] = min(frac)
d["fracA_discharge"] = max(frac)
d["nsteps"] = self.num_steps
if print_subelectrodes:
def f_dict(c):
return c.get_summary_dict(print_subelectrodes=False)
d["adj_pairs"] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=True)))
d["all_pairs"] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=False)))
return d
@dataclass
class ConversionVoltagePair(AbstractVoltagePair):
"""
A VoltagePair representing a Conversion Reaction with a defined voltage.
Typically not initialized directly but rather used by ConversionElectrode.
Attributes:
rxn (BalancedReaction): BalancedReaction for the step
voltage (float): Voltage for the step
mAh (float): Capacity of the step
vol_charge (float): Volume of charged state
vol_discharge (float): Volume of discharged state
mass_charge (float): Mass of charged state
mass_discharge (float): Mass of discharged state
frac_charge (float): Fraction of working ion in the charged state
frac_discharge (float): Fraction of working ion in the discharged state
entries_charge ([ComputedEntry]): Entries in the charged state
entries_discharge ([ComputedEntry]): Entries in discharged state
working_ion_entry (ComputedEntry): Entry of the working ion.
"""
rxn: BalancedReaction
entries_charge: Iterable[ComputedEntry]
entries_discharge: Iterable[ComputedEntry]
@classmethod
def from_steps(cls, step1, step2, normalization_els, framework_formula=None):
"""
Creates a ConversionVoltagePair from two steps in the element profile
from a PD analysis.
Args:
step1: Starting step
step2: Ending step
normalization_els: Elements to normalize the reaction by. To
ensure correct capacities.
"""
working_ion_entry = step1["element_reference"]
working_ion = working_ion_entry.composition.elements[0].symbol
working_ion_valence = max(Element(working_ion).oxidation_states)
voltage = (-step1["chempot"] + working_ion_entry.energy_per_atom) / working_ion_valence
mAh = (
(step2["evolution"] - step1["evolution"])
* Charge(1, "e").to("C")
* Time(1, "s").to("h")
* N_A
* 1000
* working_ion_valence
)
licomp = Composition(working_ion)
prev_rxn = step1["reaction"]
reactants = {comp: abs(prev_rxn.get_coeff(comp)) for comp in prev_rxn.products if comp != licomp}
curr_rxn = step2["reaction"]
products = {comp: abs(curr_rxn.get_coeff(comp)) for comp in curr_rxn.products if comp != licomp}
reactants[licomp] = step2["evolution"] - step1["evolution"]
rxn = BalancedReaction(reactants, products)
for el, amt in normalization_els.items():
if rxn.get_el_amount(el) > 1e-6:
rxn.normalize_to_element(el, amt)
break
prev_mass_dischg = (
sum([prev_rxn.all_comp[i].weight * abs(prev_rxn.coeffs[i]) for i in range(len(prev_rxn.all_comp))]) / 2
)
vol_charge = sum(
[
abs(prev_rxn.get_coeff(e.composition)) * e.structure.volume
for e in step1["entries"]
if e.composition.reduced_formula != working_ion
]
)
mass_discharge = (
sum([curr_rxn.all_comp[i].weight * abs(curr_rxn.coeffs[i]) for i in range(len(curr_rxn.all_comp))]) / 2
)
mass_charge = prev_mass_dischg
mass_discharge = mass_discharge
vol_discharge = sum(
[
abs(curr_rxn.get_coeff(e.composition)) * e.structure.volume
for e in step2["entries"]
if e.composition.reduced_formula != working_ion
]
)
totalcomp = Composition({})
for comp in prev_rxn.products:
if comp.reduced_formula != working_ion:
totalcomp += comp * abs(prev_rxn.get_coeff(comp))
frac_charge = totalcomp.get_atomic_fraction(Element(working_ion))
totalcomp = Composition({})
for comp in curr_rxn.products:
if comp.reduced_formula != working_ion:
totalcomp += comp * abs(curr_rxn.get_coeff(comp))
frac_discharge = totalcomp.get_atomic_fraction(Element(working_ion))
rxn = rxn
entries_charge = step2["entries"]
entries_discharge = step1["entries"]
return cls(
rxn=rxn,
voltage=voltage,
mAh=mAh,
vol_charge=vol_charge,
vol_discharge=vol_discharge,
mass_charge=mass_charge,
mass_discharge=mass_discharge,
frac_charge=frac_charge,
frac_discharge=frac_discharge,
entries_charge=entries_charge,
entries_discharge=entries_discharge,
working_ion_entry=working_ion_entry,
_framework_formula=framework_formula,
)
def __repr__(self):
output = [
"Conversion voltage pair with working ion {}".format(self.working_ion_entry.composition.reduced_formula),
"Reaction : {}".format(self.rxn),
"V = {}, mAh = {}".format(self.voltage, self.mAh),
"frac_charge = {}, frac_discharge = {}".format(self.frac_charge, self.frac_discharge),
"mass_charge = {}, mass_discharge = {}".format(self.mass_charge, self.mass_discharge),
"vol_charge = {}, vol_discharge = {}".format(self.vol_charge, self.vol_discharge),
]
return "\n".join(output)
def __str__(self):
return self.__repr__()
|
|
# -*- encoding: utf-8 -*-
import json
import os
import unittest
from cas_client import CASClient, CASResponse
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
try:
import mock
except ImportError:
from unittest import mock
class TestCase(unittest.TestCase):
response_text = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>jott</cas:user>
<cas:attributes>
<cas:email>jott@purdue.edu</cas:email>
<cas:i2a2characteristics>0,3592,2000</cas:i2a2characteristics>
<cas:lastname>Ott</cas:lastname>
<cas:firstname>Jeffrey A</cas:firstname>
<cas:fullname>Jeffrey A Ott</cas:fullname>
<cas:puid>0012345678</cas:puid>
</cas:attributes>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
slo_text = """
<samlp:LogoutRequest
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="[RANDOM ID]"
Version="2.0"
IssueInstant="[CURRENT DATE/TIME]">
<saml:NameID>@NOT_USED@</saml:NameID>
<samlp:SessionIndex>[SESSION IDENTIFIER]</samlp:SessionIndex>
</samlp:LogoutRequest>
"""
slo_text_2 = """
<samlp:LogoutRequest
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="935a2d0c-4026-481e-be3d-20a1b2cdd553"
Version="2.0"
IssueInstant="2016-04-08 00:40:55 +0000">
<saml:NameID>@NOT_USED@</saml:NameID>
<samlp:SessionIndex>ST-14600760351898-0B3lSFt2jOWSbgQ377B4CtbD9uq0MXR9kG23vAuH</samlp:SessionIndex>
</samlp:LogoutRequest>
"""
private_key_filepath = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'test_private_key.pem',
)
def test_success(self):
response = CASResponse(self.response_text)
self.assertTrue(response.success)
self.assertEqual(response.attributes, {
u'i2a2characteristics': u'0,3592,2000',
u'puid': u'0012345678',
u'firstname': u'Jeffrey A',
u'lastname': u'Ott',
u'fullname': u'Jeffrey A Ott',
u'email': u'jott@purdue.edu',
})
self.assertEqual(response.response_type, 'authenticationSuccess')
self.assertEqual(response.user, 'jott')
def test_failure(self):
response = CASResponse(None)
self.assertFalse(response.success)
self.assertEqual(response.response_type, 'noResponse')
def test_perform_service_validate(self):
cas_client = CASClient('https://dummy.url')
assert not cas_client.headers
with mock.patch('cas_client.CASClient._perform_get') as m:
m.return_value = self.response_text
response = cas_client.perform_service_validate(
ticket='FOO',
service_url='BAR',
)
m.assert_called_once_with(
'https://dummy.url/cas/serviceValidate?ticket=FOO&service=BAR',
headers=None
)
self.assertTrue(response.success)
self.assertEqual(response.attributes, {
u'i2a2characteristics': u'0,3592,2000',
u'puid': u'0012345678',
u'firstname': u'Jeffrey A',
u'lastname': u'Ott',
u'fullname': u'Jeffrey A Ott',
u'email': u'jott@purdue.edu',
})
self.assertEqual(response.response_type, 'authenticationSuccess')
self.assertEqual(response.user, 'jott')
def test_perform_service_validate_separate_url(self):
cas_client = CASClient(
'https://dummy.url', validate_url='https://validate.url')
assert not cas_client.headers
with mock.patch('cas_client.CASClient._perform_get') as m:
m.return_value = self.response_text
response = cas_client.perform_service_validate(
ticket='FOO',
service_url='BAR',
)
m.assert_called_once_with(
'https://validate.url/cas/serviceValidate?ticket=FOO&service=BAR',
headers=None
)
self.assertTrue(response.success)
self.assertEqual(response.attributes, {
u'i2a2characteristics': u'0,3592,2000',
u'puid': u'0012345678',
u'firstname': u'Jeffrey A',
u'lastname': u'Ott',
u'fullname': u'Jeffrey A Ott',
u'email': u'jott@purdue.edu',
})
self.assertEqual(response.response_type, 'authenticationSuccess')
self.assertEqual(response.user, 'jott')
def test_perform_service_validate_headers_call(self):
class MockResponse(object):
text = self.response_text
cas_client = CASClient('https://dummy.url')
assert not cas_client.headers
with mock.patch('requests.get') as m:
m.return_value = MockResponse()
cas_client.perform_service_validate(
ticket='FOO',
service_url='BAR',
headers={'baz': 'quux'},
)
m.assert_called_with(
'https://dummy.url/cas/serviceValidate?ticket=FOO&service=BAR',
headers={'baz': 'quux'},
verify=False,
)
def test_perform_service_validate_headers_init(self):
class MockResponse(object):
text = self.response_text
cas_client = CASClient('https://dummy.url', headers={'baz': 'quux'})
assert cas_client.headers == {'baz': 'quux'}
with mock.patch('requests.get') as m:
m.return_value = MockResponse()
cas_client.perform_service_validate(
ticket='FOO',
service_url='BAR',
)
m.assert_called_with(
'https://dummy.url/cas/serviceValidate?ticket=FOO&service=BAR',
headers={'baz': 'quux'},
verify=False,
)
def test_get_destroy_other_sessions_url(self):
cas_client = CASClient('https://dummy.url')
service_url = 'https://app.url'
url = cas_client.get_destroy_other_sessions_url(
service_url=service_url
)
self.assertEqual(
url,
'https://dummy.url/cas/destroy-other-sessions?service=https://app.url'
)
def test_get_login_url(self):
cas_client = CASClient('https://dummy.url')
service_url = 'https://app.url'
url = cas_client.get_login_url(service_url=service_url)
self.assertEqual(url, 'https://dummy.url/cas/login?service=https://app.url')
def test_get_logout_url(self):
cas_client = CASClient('https://dummy.url')
service_url = 'https://app.url'
url = cas_client.get_logout_url(service_url=service_url)
self.assertEqual(url, 'https://dummy.url/cas/logout?service=https://app.url')
def test_parse_logout_request(self):
cas_client = CASClient('https://dummy.url')
parsed_message = cas_client.parse_logout_request(self.slo_text)
self.assertEqual(parsed_message, {
'ID': '[RANDOM ID]',
'IssueInstant': '[CURRENT DATE/TIME]',
'Version': '2.0',
'session_index': '[SESSION IDENTIFIER]',
'xmlns:saml': 'urn:oasis:names:tc:SAML:2.0:assertion',
'xmlns:samlp': 'urn:oasis:names:tc:SAML:2.0:protocol',
})
def test_parse_logout_request_2(self):
cas_client = CASClient('https://dummy.url')
parsed_message = cas_client.parse_logout_request(self.slo_text_2)
self.assertEqual(parsed_message, {
'ID': '935a2d0c-4026-481e-be3d-20a1b2cdd553',
'IssueInstant': '2016-04-08 00:40:55 +0000',
'Version': '2.0',
'session_index': 'ST-14600760351898-0B3lSFt2jOWSbgQ377B4CtbD9uq0MXR9kG23vAuH',
'xmlns:saml': 'urn:oasis:names:tc:SAML:2.0:assertion',
'xmlns:samlp': 'urn:oasis:names:tc:SAML:2.0:protocol',
})
def test_get_api_url(self):
url = self._get_test_api_url()
query_string = url.partition('?')[-1]
query_parameters = {
key: value[0]
for key, value in parse_qs(query_string).items()
}
assert query_parameters == {
'at': (
'eyJhbmQiOiAiYW5vdGhlcl90aGluZyIsICJhdXRoZW50aWNhdG9yIjogIm15'
'X2NvbXBhbnlfbGRhcCIsICJ0aWNrZXQiOiAiQVRULTEyMzQiLCAieW91Ijog'
'InNob3VsZF9rbm93In0='
),
'ats': (
'FISMx+fVfKKzI160MQRMauKdeqBRzzg+Ihwh0WqhqcnW4d+S0IyrTg6/oY1a'
'wGvhBGrSMzOEBfYyihj5SxmLMr+xWm5Ndt+m0WcjuOR2GEwtEimIbbEQslCu'
'f+//tG2u3UacStBRctt/cWnIGlW9cIPlUgU4iVVQtpbC7DdJc9+2rwzN10jV'
'36JUwAWWT3iQseTiyMy+Bbuu1bzTcdtKvBdHTnCwcu1m9vkQraH/ZuVbYVMB'
'jZC1s5lXECLN+fnC00laglYmgQ1w59EoQIXuaaHFqgq+zRvRxm4r0ASG5F0D'
'bPT0fEDihQulSAbyOY5/6nhkFq6NYlJADKuGchFusk9D3Pcgs2KyEW3xvBb4'
'ZArn2oaI8sxjOYUXutf1xe5MBGy8oTW+3QbHVv+hzXOrwJXsbSz6bx3gmDYb'
'bDilhbRgPQeTH17IwqArrVgnjgcAMoDk6cTqU548S19KMc8B99pVZ7JMM5Ls'
'uKx/ZWUF0naXFeuEaFJ5TdaO6HhhiRhUAEwlnwTQwwJuR1VtcYx4z3Lb5NhN'
'CtH658M8acru4Dv4jV5NC3IPJcCijKGVjZQ0K6GrD863fr3usnH1gvnTzNgJ'
'1jijF4FmyIr8E9kpNM5Mk7D0AqSGCC2nZcu/r4+2rcLiq9XxViv3jpe44alQ'
'RjhkcqcbkcJvnhckfgjrU7w='
),
'service': 'https://example.com',
}
def test_get_auth_token_login_url(self):
cas_client = CASClient('https://dummy.url')
auth_token_ticket = 'AT-1234'
authenticator = 'my_company_ldap'
username = 'my_user'
service_url = 'https://example.com'
with open(self.private_key_filepath, 'r') as file_pointer:
private_key = file_pointer.read()
url = cas_client.get_auth_token_login_url(
auth_token_ticket=auth_token_ticket,
authenticator=authenticator,
private_key=private_key,
service_url=service_url,
username=username,
)
query_string = url.partition('?')[-1]
query_parameters = {
key: value[0]
for key, value in parse_qs(query_string).items()
}
assert query_parameters == {
'at': (
'eyJhdXRoZW50aWNhdG9yIjogIm15X2NvbXBhbnlfbGRhcCIsICJ0aWNrZXQi'
'OiAiQVQtMTIzNCIsICJ1c2VybmFtZSI6ICJteV91c2VyIn0='
),
'ats': (
'pZ3m58k8Xpd+TDlYb+VDV89TVGoPIAgsxDMNGtNLqzchg/EFy12NzVaUbVSz'
'1PNZdQ/klMrfvxzehLlFp9QkyfFoUS5pgUo9XXjpowWe0E9eKX5hBJjpmvD+'
'PhSMRXFOPUOLRohRX45aPqJ4mjh2MNP0mzKrRfoRoUT/6mmrvLRJu150rtnS'
'A5E4n0V4BeJXWIFYqqu8B4CP3fbg18HMB5g36P61m6I67kDmBLfTlmtrwvM5'
'Vh3r9q9HFGn1NGmdMTcqGwAqfrww2XuBBemTpcfvSLNhTf/nZ21042BDt0+J'
'TLNsGBxNKS39NznyOcf2g5XtscdJXcDcKan/eJI7WHNtpmJPzhA4H5wTuAm7'
'X0WgAN7hxmTYy3E0241j6Q1DNDuxvgkSMS7CJhD3p0Fp0kHsdCslLuqjMoou'
'THSshfJU6lvE4dc1vh3fdzKiAcmvMQ2RT4ACNQVwVYiE9UWu23D16yz08sV2'
'9kzlFTCTXT608tHMVCx1x7K959IxcRUFld314ooqJ5BgrK/2QqtZXS0w581f'
'8P5qViQoOrQ5gRiPZ/bT6eF24RLuKN78VEkak2z0B1aZqpEcG3wQC4qHeUaM'
'TgrihbVi6eIv7N5k6srSyGCAQ/9k7o53ZKG8MzkqMJq53AoEXNj8HNQxgO0D'
'OtFwXLMrlrFpmqPS5OcO9NM='
),
'service': 'https://example.com',
}
def test_acquire_auth_token_ticket_no_headers(self):
class MockResponse(object):
text = '{"ticket": "FOO"}'
cas_client = CASClient('https://dummy.url')
assert not cas_client.headers
with mock.patch('requests.post') as m:
m.return_value = MockResponse()
cas_client.acquire_auth_token_ticket()
m.assert_called_with(
'https://dummy.url/cas/api/auth_token_tickets',
data=None,
headers=None,
verify=False,
)
def test_acquire_auth_token_ticket_headers_call(self):
class MockResponse(object):
text = '{"ticket": "FOO"}'
cas_client = CASClient('https://dummy.url')
assert not cas_client.headers
with mock.patch('requests.post') as m:
m.return_value = MockResponse()
cas_client.acquire_auth_token_ticket(headers={'baz': 'quux'})
m.assert_called_with(
'https://dummy.url/cas/api/auth_token_tickets',
data=None,
headers={'baz': 'quux'},
verify=False,
)
def test_acquire_auth_token_ticket_headers_init(self):
class MockResponse(object):
text = '{"ticket": "FOO"}'
cas_client = CASClient('https://dummy.url', headers={'baz': 'quux'})
assert cas_client.headers == {'baz': 'quux'}
with mock.patch('requests.post') as m:
m.return_value = MockResponse()
cas_client.acquire_auth_token_ticket()
m.assert_called_with(
'https://dummy.url/cas/api/auth_token_tickets',
data=None,
headers={'baz': 'quux'},
verify=False,
)
def _get_test_api_url(self):
cas_client = CASClient('https://dummy.url')
api_resource = 'do_something_useful'
auth_token_ticket = 'ATT-1234'
authenticator = 'my_company_ldap'
with open(self.private_key_filepath, 'r') as file_pointer:
private_key = file_pointer.read()
service_url = 'https://example.com'
kwargs = {
'and': 'another_thing',
'you': 'should_know',
}
return cas_client.get_api_url(
api_resource=api_resource,
auth_token_ticket=auth_token_ticket,
authenticator=authenticator,
private_key=private_key,
service_url=service_url,
**kwargs
)
def test_perform_api_request(self):
class MockResponse(object):
text = '{"something": "useful"}'
success = True
cas_client = CASClient('https://dummy.url')
url = self._get_test_api_url()
service_ticket = 'ST-14600760351898-0B3lSFt2jOWSbgQ377B4CtbD9uq0MXR9kG23vAuH'
assert not cas_client.headers
with mock.patch('cas_client.CASClient._perform_post') as m:
m.return_value = MockResponse()
response = cas_client.perform_api_request(
url,
method='POST',
body={'st': service_ticket},
timeout=45
)
m.assert_called_once_with(
url,
data={'st': service_ticket},
headers=None,
timeout=45
)
print(response)
self.assertTrue(response.success)
self.assertEqual(json.loads(response.text), {
'something': 'useful'
})
|
|
import datetime
import json
import math
from string import Template
import sys
import traceback
import random
from asgiref.sync import async_to_sync
from EOSS.analyst.helpers import feature_expression_to_string
from EOSS.critic.critic import Critic
from EOSS.data_mining.api import DataMiningClient
from EOSS.data_mining.interface.ttypes import BinaryInputArchitecture, DiscreteInputArchitecture
from daphne_context.models import UserInformation, DialogueHistory
from EOSS.graphql.api import GraphqlClient
from EOSS.vassar.api import VASSARClient
def active_engineer_response(user_info: UserInformation, inputs, session_key):
vassar_client = VASSARClient(user_information=user_info)
# Check Vassar Status
vassar_status = async_to_sync(vassar_client.check_status)(user_info.eosscontext.vassar_request_queue_url, user_info.eosscontext.vassar_response_queue_url)
if vassar_status != 'ready':
print('--> INVALID VASSAR STATUS:', vassar_status)
return []
# Evaluate Mutated Architecture
arch = vassar_client.evaluate_architecture(inputs, user_info.eosscontext.vassar_request_queue_url)
arch['db_id'] = arch['id']
# Pass evaluated arch to critic
critic = Critic(user_info, session_key)
suggestion_list = critic.expert_critic(arch)
return suggestion_list
def generate_engineer_message(user_info, genome, session_key):
message = {}
if user_info.eosscontext.activecontext.show_arch_suggestions:
suggestion_list = active_engineer_response(user_info, genome, session_key)
if len(suggestion_list) == 0:
return {}
suggestion_list = parse_suggestions_list(suggestion_list)
message = {
'voice_message': 'The Live Recommender System has the following suggestions for your modified '
'architecture.',
'visual_message_type': ['list'],
'visual_message': [
{
'begin': 'The Live Recommender System has the following suggestions for your modified '
'architecture: ',
'list': suggestion_list
}
],
"writer": "daphne",
}
else:
message = {
'voice_message': 'The Live Recommender System has some suggestions for your modified architecture, '
'but you have chosen to not show them. Do you want to see them now?',
'visual_message_type': ['active_message'],
'visual_message': [
{
'message': 'The Live Recommender System has some suggestions for your modified architecture, '
'but you have chosen to not show them. Do you want to see them now?',
'setting': 'show_arch_suggestions'
}
],
"writer": "daphne",
}
DialogueHistory.objects.create(user_information=user_info,
voice_message=message["voice_message"],
visual_message_type=json.dumps(message["visual_message_type"]),
visual_message=json.dumps(message["visual_message"]),
dwriter="daphne",
date=datetime.datetime.utcnow())
return message
def active_historian_response(user_info: UserInformation, inputs, session_key):
input_str = boolean_array_2_boolean_string(inputs)
design = {'inputs': input_str}
critic = Critic(user_info, session_key)
suggestion_list = critic.historian_critic(design)
return suggestion_list
def generate_historian_message(user_info, genome, session_key):
message = {}
if user_info.eosscontext.activecontext.show_arch_suggestions:
suggestion_list = active_historian_response(user_info, genome, session_key)
if len(suggestion_list) == 0:
return {}
suggestion_list = parse_suggestions_list(suggestion_list)
message = {
'voice_message': 'The Live Recommender System has the following suggestions for your modified '
'architecture.',
'visual_message_type': ['list'],
'visual_message': [
{
'begin': 'The Live Recommender System has the following suggestions for your modified '
'architecture: ',
'list': suggestion_list
}
],
"writer": "daphne",
}
else:
message = {
'voice_message': 'The Live Recommender System has some suggestions for your modified architecture, '
'but you have chosen to not show them. Do you want to see them now?',
'visual_message_type': ['active_message'],
'visual_message': [
{
'message': 'The Live Recommender System has some suggestions for your modified architecture, '
'but you have chosen to not show them. Do you want to see them now?',
'setting': 'show_arch_suggestions'
}
],
"writer": "daphne",
}
DialogueHistory.objects.create(user_information=user_info,
voice_message=message["voice_message"],
visual_message_type=json.dumps(message["visual_message_type"]),
visual_message=json.dumps(message["visual_message"]),
dwriter="daphne",
date=datetime.datetime.utcnow())
return message
def active_analyst_response(user_info: UserInformation, session_key):
result = []
dm_client = DataMiningClient()
vassar_client = VASSARClient(user_information=user_info)
problem_id = user_info.eosscontext.problem_id
problem_type = vassar_client.get_problem_type(problem_id)
try:
# Start connection with data_mining
dm_client.startConnection()
behavioral = []
non_behavioral = []
dataset = vassar_client.get_dataset_architectures(problem_id, user_info.eosscontext.dataset_id)
if len(dataset) < 10:
raise ValueError("Could not run data mining: the number of samples is less than 10")
else:
utopiaPoint = [1, 0]
temp = []
maxObjectives = [0, 0]
# Find the maximum values of all objectives for normalization
for design in dataset:
outputs = design["outputs"]
for index, output in enumerate(outputs):
maxObjectives[index] = max(maxObjectives[index], output)
# Select the top N% archs based on the distance to the utopia point
for design in dataset:
outputs = design["outputs"]
id = design["id"]
dist = math.sqrt(((outputs[0] - utopiaPoint[0])/(maxObjectives[0] - utopiaPoint[0])) ** 2 + ((outputs[1] - utopiaPoint[1])/(maxObjectives[1] - utopiaPoint[1])) ** 2)
temp.append((id, dist))
# Sort the list based on the distance to the utopia point
temp = sorted(temp, key=lambda x: x[1])
for i in range(len(temp)):
if i <= len(temp) // 10: # Label the top 10% architectures as behavioral
behavioral.append(temp[i][0])
else:
non_behavioral.append(temp[i][0])
# Extract feature
_archs = []
if problem_type == "assignation":
for arch in dataset:
_archs.append(BinaryInputArchitecture(arch["id"], arch["inputs"], arch["outputs"]))
_features = dm_client.client.getDrivingFeaturesEpsilonMOEABinary(session_key, problem_id, problem_type,
behavioral, non_behavioral, _archs)
elif problem_type == "discrete":
for arch in dataset:
_archs.append(DiscreteInputArchitecture(arch["id"], arch["inputs"], arch["outputs"]))
_features = dm_client.client.getDrivingFeaturesEpsilonMOEADiscrete(session_key, problem_id, problem_type,
behavioral, non_behavioral, _archs)
else:
raise ValueError("Problem type not implemented")
features = []
for df in _features:
features.append({'id': df.id, 'name': df.name, 'expression': df.expression, 'metrics': df.metrics, 'complexity': df.complexity})
# Bias features by complexity and generality
features.sort(key=lambda f: f["metrics"][0], reverse=True) # Sort features by their support (how many archs have it)
features.sort(key=lambda f: f["complexity"]) # And then by how simple they are
advices = []
for feature in features:
if feature["expression"] == "":
continue
advices.append(feature_expression_to_string(feature["expression"], is_critique=False, context=user_info.eosscontext))
# End the connection before return statement
dm_client.endConnection()
for i in range(len(advices)):
advice = advices[i]
result.append({
"type": "Analyst",
"advice": advice
})
except Exception as e:
print("Exc in generating critic from data mining: " + str(e))
traceback.print_exc(file=sys.stdout)
dm_client.endConnection()
return result
def generate_analyst_message(user_info, session_key):
message = {}
features_list = active_analyst_response(user_info, session_key)[:20]
random.shuffle(features_list)
features_list = features_list[:3]
if len(features_list) == 0:
return {}
features_list = [feature["advice"] for feature in features_list]
message = {
'voice_message': 'According to the Analyst, designs on the Pareto front consistently show these features.',
'visual_message_type': ['list'],
'visual_message': [
{
'begin': 'According to the Analyst, designs on the Pareto front consistently show these features:',
'list': features_list
}
],
"writer": "daphne",
}
DialogueHistory.objects.create(user_information=user_info,
voice_message=message["voice_message"],
visual_message_type=json.dumps(message["visual_message_type"]),
visual_message=json.dumps(message["visual_message"]),
dwriter="daphne",
date=datetime.datetime.utcnow())
return message
def parse_suggestions_list(raw_list):
parsed_list = []
element_template = Template("<b>${type}</b>: ${advice}")
for element in raw_list:
parsed_list.append(element_template.substitute(element))
return parsed_list
def boolean_array_2_boolean_string(boolean_array):
leng = len(boolean_array)
bool_string = ''
for i in range(leng):
if boolean_array[i]:
bool_string += '1'
else:
bool_string += '0'
return bool_string
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ArticleAsset'
db.create_table('journalmanager_articleasset', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(related_name='assets', to=orm['journalmanager.Article'])),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('owner', self.gf('django.db.models.fields.CharField')(default=u'', max_length=1024)),
('use_license', self.gf('django.db.models.fields.TextField')(default=u'')),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('journalmanager', ['ArticleAsset'])
def backwards(self, orm):
# Deleting model 'ArticleAsset'
db.delete_table('journalmanager_articleasset')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.aheadpressrelease': {
'Meta': {'object_name': 'AheadPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Journal']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.article': {
'Meta': {'object_name': 'Article'},
'aid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'article_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'doi': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '2048', 'db_index': 'True'}),
'domain_key': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_aop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issn_epub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issn_ppub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Issue']"}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['journalmanager.Article']", 'null': 'True', 'through': "orm['journalmanager.ArticlesLinkage']", 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'xml': ('scielomanager.custom_fields.XMLSPSField', [], {}),
'xml_version': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'journalmanager.articleasset': {
'Meta': {'object_name': 'ArticleAsset'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assets'", 'to': "orm['journalmanager.Article']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1024'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.TextField', [], {'default': "u''"})
},
'journalmanager.articlecontrolattributes': {
'Meta': {'object_name': 'ArticleControlAttributes'},
'article': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'control_attributes'", 'unique': 'True', 'to': "orm['journalmanager.Article']"}),
'articles_linkage_is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'es_is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'es_updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'journalmanager.articleslinkage': {
'Meta': {'object_name': 'ArticlesLinkage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referrers'", 'to': "orm['journalmanager.Article']"}),
'link_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links_to'", 'to': "orm['journalmanager.Article']"})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.issue': {
'Meta': {'ordering': "('created', 'id')", 'object_name': 'Issue'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'publication_end_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_start_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}),
'spe_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'suppl_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}),
'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'journalmanager.issuetitle': {
'Meta': {'object_name': 'IssueTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.journal': {
'Meta': {'ordering': "('title', 'id')", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'ccn_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'through': "orm['journalmanager.Membership']", 'symmetrical': 'False'}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'editor_journal'", 'null': 'True', 'to': "orm['auth.User']"}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'final_num': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'init_vol': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '254', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.journalmission': {
'Meta': {'object_name': 'JournalMission'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'})
},
'journalmanager.journaltimeline': {
'Meta': {'object_name': 'JournalTimeline'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''"}),
'since': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'journalmanager.journaltitle': {
'Meta': {'object_name': 'JournalTitle'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.membership': {
'Meta': {'unique_together': "(('journal', 'collection'),)", 'object_name': 'Membership'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16'})
},
'journalmanager.pendedform': {
'Meta': {'object_name': 'PendedForm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.pendedvalue': {
'Meta': {'object_name': 'PendedValue'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'journalmanager.pressrelease': {
'Meta': {'object_name': 'PressRelease'},
'doi': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'journalmanager.pressreleasearticle': {
'Meta': {'object_name': 'PressReleaseArticle'},
'article_pid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['journalmanager.PressRelease']"})
},
'journalmanager.pressreleasetranslation': {
'Meta': {'object_name': 'PressReleaseTranslation'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['journalmanager.PressRelease']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.regularpressrelease': {
'Meta': {'object_name': 'RegularPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Issue']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.section': {
'Meta': {'ordering': "('id',)", 'object_name': 'Section'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'journalmanager.sectiontitle': {
'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.studyarea': {
'Meta': {'object_name': 'StudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.translateddata': {
'Meta': {'object_name': 'TranslatedData'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tz': ('django.db.models.fields.CharField', [], {'default': "'America/Sao_Paulo'", 'max_length': '150'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['journalmanager']
|
|
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclass utils."""
import dataclasses
import enum
from typing import Any, Generic, Optional, Type, TypeVar, Union
from jax3d.utils.typing import Dataclass
_InT = TypeVar('_InT')
_OutT = TypeVar('_OutT')
_EnumT = TypeVar('_EnumT', bound=enum.Enum)
class DataclassField(Generic[_InT, _OutT]):
"""Abstract descriptor which perform data validation/conversion.
This is somewhat similar to `traitlets`, but directly compatible with
dataclasses.
Example:
```
class PositiveInteger(DataclassField[int, int]):
def _default(self):
return 123
def _validate(self, value: int) -> int:
if i < 0:
raise ValueError('i should be positive')
return i
@dataclasses.dataclass
class A:
x: int = PositiveInteger()
A(x=1) # works
A(x=-1) # ValueError: i should be positive
assert A().x == 123 # Default factory
```
"""
def __init__(self, default: _InT = dataclasses.MISSING) -> None:
"""Constructor.
Args:
default: Default field value. If not passed, the value will be required.
"""
# Attribute name and objtype refer to the object in which the descriptor
# is applied. E.g. in the docstring example:
# * _attribute_name = 'x'
# * _objtype = A
self._attribute_name: Optional[str] = None
self._objtype: Optional[Type[Dataclass]] = None
# Whether the descriptor is a required or not. If set to True,
# calling `A()` will raise "Missing required kwargs 'x'" unless
# `_default()` is overwritten.
self._is_missing = default is dataclasses.MISSING
# Default value (when `_is_missing is False`)
# Should validate really called at import time ?
self._default_value = None if self._is_missing else self._validate(default)
# Whether `__get__` was called once. See `__get__` for details.
self._first_getattr_call: bool = True
def __set_name__(self, objtype: Type[Dataclass], name: str) -> None:
self._objtype = objtype
self._attribute_name = name
def __get__(
self,
obj: Optional[Dataclass],
objtype: Optional[Type[Dataclass]] = None,
) -> _OutT:
# Called as `MyDataclass.my_path`
if obj is None:
if not self._is_missing:
return self._default_value
# If `_default` is overwritten, we need to send `default_factory`
# to dataclass.
if self.is_default_overwritten() and self._first_getattr_call:
# Count the number of times `dataclasses.dataclass(cls)` calls
# `getattr(cls, f.name)`.
# The first time, we return a `dataclasses.Field` to let dataclass
# do the magic.
# The second time, `dataclasses.dataclass` delete the descriptor if
# `isinstance(getattr(cls, f.name, None), Field)`. So it is very
# important to return anything except a `dataclasses.Field`.
# This rely on implementation detail, but seems to hold for python
# 3.6-3.10.
self._first_getattr_call = False
return dataclasses.field(default_factory=self._default)
else:
# If no default value is provided, `MyDataclass.my_path` should raise
# AttributeError.
raise AttributeError(
f"type object '{self._objtype.__qualname__}' has no attribute "
f"'{self._attribute_name}'"
)
# Called as `my_dataclass.my_path`
return _getattr(
obj,
self._attribute_name,
dataclasses.MISSING if self._is_missing else self._default_value
)
def __set__(self, obj: Dataclass, value: _InT) -> None:
_setattr(obj, self._attribute_name, self._validate(value))
def is_default_overwritten(self) -> bool:
return type(self)._default is not DataclassField._default
def _default(self) -> _OutT:
"""Abstract factory which returns the default value if none is provided.
Returns:
value: The default value.
"""
raise NotImplementedError('Abstract method.')
def _validate(self, value: _InT) -> _OutT:
"""Abstract method which validate or convert attribute value.
Note: Calling `_validate` twice on a value should be a no-op (so
`_validate(_validate(x)) == _validate(x)`)
Args:
value: Input value to validate/convert, as passed in `__init__` or `=`
Returns:
value: The value, eventually converted/updated.
"""
return value
def _getattr(
obj: Dataclass,
attribute_name: str,
default: Union[_OutT, type(dataclasses.MISSING)],
) -> _OutT:
"""Returns the `obj.attribute_name`."""
_init_dataclass_state(obj)
# Accessing the attribute before it was set (e.g. before super().__init__)
if (
attribute_name not in obj._dataclass_field_values # pylint: disable=protected-access
and default is dataclasses.MISSING
):
raise AttributeError(
f"type object '{type(obj).__qualname__}' has no attribute "
f"'{attribute_name}'"
)
else:
return obj._dataclass_field_values.get(attribute_name, default) # pylint: disable=protected-access
def _setattr(
obj: Dataclass,
attribute_name: str,
value: Any,
) -> None:
"""Set the `obj.attribute_name = value`."""
# Note: In `dataclasses.dataclass(frozen=True)`, obj.__setattr__ will
# correctly raise a `FrozenInstanceError` before `DataclassField.__set__` is
# called.
_init_dataclass_state(obj)
obj._dataclass_field_values[attribute_name] = value # pylint: disable=protected-access
def _init_dataclass_state(obj: Dataclass) -> None:
"""Initialize the object state containing all DataclassField values."""
if not hasattr(obj, '_dataclass_field_values'):
# Use object.__setattr__ for frozen dataclasses
object.__setattr__(obj, '_dataclass_field_values', {})
class EnumField(DataclassField[Union[str, _EnumT], _EnumT]):
"""Enum field which auto-convert `str` in value.
Example:
```python
@dataclasses.dataclass
class A:
my_enum: j3d.utils.EnumField(MyEnum.DEFAULT)
required_enum = j3d.utils.EnumField(enum_cls=MyEnum)
optional_enum = j3d.utils.EnumField(None, enum_cls=MyEnum)
a = A(
required_enum='some_value',
)
assert a.required_enum is MyEnum.SOME_VALUE
```
"""
def __init__(
self,
default: Union[
str, None, _EnumT, type(dataclasses.MISSING)
] = dataclasses.MISSING, # pylint: disable=bad-whitespace
*,
enum_cls: Optional[Type[_EnumT]] = None,
**kwargs: Any,
):
"""Constructor.
Args:
default: Default enum value.
enum_cls: Enum class. Only required if `default` is None or missing.
**kwargs: Forwarded to `DataclassField`
"""
# Try to auto-infer enum type from the param.
if isinstance(default, enum.Enum):
if enum_cls is None:
enum_cls = type(default)
elif not isinstance(default, enum_cls):
raise ValueError(f'Conflicting enum types: {default} is not {enum_cls}')
self._enum_cls: Type[_EnumT] = enum_cls # pytype: disable=annotation-type-mismatch
self._str2enum = {x.name.lower(): x for x in self._enum_cls}
super().__init__(default, **kwargs)
def _validate(self, value: Union[str, None, _EnumT]) -> Optional[_EnumT]:
"""Validate the value."""
if isinstance(value, str):
value = value.lower() # pytype: disable=attribute-error
if value not in self._str2enum:
raise ValueError(
f'Enum should be one of {list(self._str2enum.keys())}. '
f'Not {value!r}.'
)
return self._str2enum[value]
elif isinstance(value, self._enum_cls):
return value # pytype: disable=bad-return-type
elif value is None:
return None
else:
raise TypeError(f'Invalid input {value}')
|
|
## client.py
# Basic IRC client implementation.
import time
import datetime
import itertools
import logging
from . import async
from . import connection
from . import protocol
__all__ = [ 'Error', 'AlreadyInChannel', 'NotInChannel', 'BasicClient' ]
PING_TIMEOUT = 720
DEFAULT_NICKNAME = '<unregistered>'
class Error(Exception):
""" Base class for all pydle errors. """
pass
class NotInChannel(Error):
def __init__(self, channel):
super().__init__('Not in channel: {}'.format(channel))
self.channel = channel
class AlreadyInChannel(Error):
def __init__(self, channel):
super().__init__('Already in channel: {}'.format(channel))
self.channel = channel
class BasicClient:
"""
Base IRC client class.
This class on its own is not complete: in order to be able to run properly, _has_message, _parse_message and _create_message have to be overloaded.
"""
RECONNECT_ON_ERROR = True
RECONNECT_MAX_ATTEMPTS = 3
RECONNECT_DELAYED = True
RECONNECT_DELAYS = [0, 5, 10, 30, 120, 600]
def __init__(self, nickname, fallback_nicknames=[], username=None, realname=None, **kwargs):
""" Create a client. """
self._nicknames = [nickname] + fallback_nicknames
self.username = username or nickname.lower()
self.realname = realname or nickname
self.eventloop = None
self._reset_connection_attributes()
self._reset_attributes()
if kwargs:
self.logger.warning('Unused arguments: %s', ', '.join(kwargs.keys()))
def _reset_attributes(self):
""" Reset attributes. """
# Record-keeping.
self.channels = {}
self.users = {}
# Low-level data stuff.
self._last_data_received = time.time()
self._receive_buffer = b''
self._pending = {}
self._handler_top_level = False
self._ping_checker_handle = None
# Misc.
self.logger = logging.getLogger(__name__)
# Public connection attributes.
self.nickname = DEFAULT_NICKNAME
self.network = None
def _reset_connection_attributes(self):
""" Reset connection attributes. """
self.connection = None
self.encoding = None
self._autojoin_channels = []
self._reconnect_attempts = 0
## Connection.
def connect(self, hostname=None, port=None, reconnect=False, eventloop=None, **kwargs):
""" Connect to IRC server. """
if (not hostname or not port) and not reconnect:
raise ValueError('Have to specify hostname and port if not reconnecting.')
# Disconnect from current connection.
if self.connected:
self.disconnect(expected=True)
# Set event loop.
if eventloop:
self.eventloop = eventloop
elif not self.eventloop:
self.eventloop = async.EventLoop()
# Reset attributes and connect.
if not reconnect:
self._reset_connection_attributes()
self._connect(hostname=hostname, port=port, reconnect=reconnect, **kwargs)
# Schedule pinger.
self._ping_checker_handle = self.eventloop.schedule_periodically(PING_TIMEOUT / 2, self._check_ping_timeout)
# Set logger name.
if self.server_tag:
self.logger = logging.getLogger(self.__class__.__name__ + ':' + self.server_tag)
def disconnect(self, expected=True):
""" Disconnect from server. """
if self.connected:
# Unschedule ping checker.
self.eventloop.unschedule(self._ping_checker_handle)
# Shutdown connection.
self.connection.off('read', self.on_data)
self.connection.off('error', self.on_data_error)
self.connection.disconnect()
# Callback.
self.on_disconnect(expected)
# Reset any attributes.
self._reset_attributes()
def _connect(self, hostname, port, reconnect=False, channels=[], encoding=protocol.DEFAULT_ENCODING, source_address=None):
""" Connect to IRC host. """
# Create connection if we can't reuse it.
if not reconnect or not self.connection:
self._autojoin_channels = channels
self.connection = connection.Connection(hostname, port, source_adress=source_address, eventloop=self.eventloop)
self.encoding = encoding
# Connect.
self.connection.connect()
# Add handlers.
self.connection.on('read', self.on_data)
self.connection.on('error', self.on_data_error)
def _reconnect_delay(self):
""" Calculate reconnection delay. """
if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED:
if self._reconnect_attempts >= len(self.RECONNECT_DELAYS):
return self.RECONNECT_DELAYS[-1]
else:
return self.RECONNECT_DELAYS[self._reconnect_attempts]
else:
return 0
def _check_ping_timeout(self):
""" Check if we have received anything from the server in a while. """
if time.time() - self._last_data_received >= PING_TIMEOUT:
error = TimeoutError('Ping timeout: no data received from server in {timeout} seconds.'.format(timeout=PING_TIMEOUT))
self.on_data_error(error)
## Internal database management.
def _create_channel(self, channel):
self.channels[channel] = {
'users': set(),
}
def _destroy_channel(self, channel):
# Copy set to prevent a runtime error when destroying the user.
for user in set(self.channels[channel]['users']):
self._destroy_user(user, channel)
del self.channels[channel]
def _create_user(self, nickname):
# Servers are NOT users.
if not nickname or '.' in nickname:
return
self.users[nickname] = {
'nickname': nickname,
'username': None,
'realname': None,
'hostname': None
}
def _sync_user(self, nick, metadata):
# Create user in database.
if nick not in self.users:
self._create_user(nick)
if nick not in self.users:
return
self.users[nick].update(metadata)
def _rename_user(self, user, new):
if user in self.users:
self.users[new] = self.users[user]
self.users[new]['nickname'] = new
del self.users[user]
else:
self._create_user(new)
if new not in self.users:
return
for ch in self.channels.values():
# Rename user in channel list.
if user in ch['users']:
ch['users'].discard(user)
ch['users'].add(new)
def _destroy_user(self, nickname, channel=None):
if channel:
channels = [ self.channels[channel] ]
else:
channels = self.channels.values()
for ch in channels:
# Remove from nicklist.
ch['users'].discard(nickname)
# If we're not in any common channels with the user anymore, we have no reliable way to keep their info up-to-date.
# Remove the user.
if not channel or not any(nickname in ch['users'] for ch in self.channels.values()):
del self.users[nickname]
def _parse_user(self, data):
""" Parse user and return nickname, metadata tuple. """
raise NotImplementedError()
def _format_user_mask(self, nickname):
user = self.users.get(nickname, { "nickname": nickname, "username": "*", "hostname": "*" })
return self._format_host_mask(user['nickname'], user['username'] or '*', user['hostname'] or '*')
def _format_host_mask(self, nick, user, host):
return '{n}!{u}@{h}'.format(n=nick, u=user, h=host)
## IRC helpers.
def is_channel(self, chan):
""" Check if given argument is a channel name or not. """
return True
def in_channel(self, channel):
""" Check if we are currently in the given channel. """
return channel in self.channels.keys()
def is_same_nick(self, left, right):
""" Check if given nicknames are equal. """
return left == right
def is_same_channel(self, left, right):
""" Check if given channel names are equal. """
return left == right
## IRC attributes.
@property
def connected(self):
""" Whether or not we are connected. """
return self.connection and self.connection.connected
@property
def server_tag(self):
if self.connected and self.connection.hostname:
if self.network:
tag = self.network.lower()
else:
tag = self.connection.hostname.lower()
# Remove hostname prefix.
if tag.startswith('irc.'):
tag = tag[4:]
# Check if host is either an FQDN or IPv4.
if '.' in tag:
# Attempt to cut off TLD.
host, suffix = tag.rsplit('.', 1)
# Make sure we aren't cutting off the last octet of an IPv4.
try:
int(suffix)
except ValueError:
tag = host
return tag
else:
return None
## IRC API.
def raw(self, message):
""" Send raw command. """
self._send(message)
def rawmsg(self, command, *args, **kwargs):
""" Send raw message. """
message = str(self._create_message(command, *args, **kwargs))
self._send(message)
## Overloadable callbacks.
def on_connect(self):
""" Callback called when the client has connected successfully. """
# Reset reconnect attempts.
self._reconnect_attempts = 0
def on_disconnect(self, expected):
if not expected:
# Unexpected disconnect. Reconnect?
if self.RECONNECT_ON_ERROR and (self.RECONNECT_MAX_ATTEMPTS is None or self._reconnect_attempts < self.RECONNECT_MAX_ATTEMPTS):
# Calculate reconnect delay.
delay = self._reconnect_delay()
self._reconnect_attempts += 1
if delay > 0:
self.logger.error('Unexpected disconnect. Attempting to reconnect within %s seconds.', delay)
else:
self.logger.error('Unexpected disconnect. Attempting to reconnect.')
# Wait and reconnect.
self.eventloop.schedule_in(delay, self.connect, reconnect=True)
else:
self.logger.error('Unexpected disconnect. Giving up.')
## Message dispatch.
def _has_message(self):
""" Whether or not we have messages available for processing. """
raise NotImplementedError()
def _create_message(self, command, *params, **kwargs):
raise NotImplementedError()
def _parse_message(self):
raise NotImplementedError()
def _send(self, input):
if not isinstance(input, (bytes, str)):
input = str(input)
if isinstance(input, str):
input = input.encode(self.encoding)
self.connection.send(input)
def handle_forever(self):
""" Handle data forever. """
self.connection.run_forever()
## Raw message handlers.
def on_data(self, data):
""" Handle received data. """
self._receive_buffer += data
self._last_data_received = time.time()
while self._has_message():
message = self._parse_message()
self.on_raw(message)
def on_data_error(self, exception):
""" Handle error. """
self.logger.error('Encountered error on socket.', exc_info=(type(exception), exception, None))
self.disconnect(expected=False)
def on_raw(self, message):
""" Handle a single message. """
self.logger.debug('<< [%s] %s %s', message.source or '', message.command, message.params)
if not message._valid:
self.logger.warning('Encountered strictly invalid IRC message from server: %s', message._raw)
if isinstance(message.command, int):
cmd = str(message.command).zfill(3)
else:
cmd = message.command
# Invoke dispatcher, if we have one.
method = 'on_raw_' + cmd.lower()
try:
# Set _top_level so __getattr__() can decide whether to return on_unknown or _ignored for unknown handlers.
# The reason for this is that features can always call super().on_raw_* safely and thus don't need to care for other features,
# while unknown messages for which no handlers exist at all are still logged.
self._handler_top_level = True
handler = getattr(self, method)
self._handler_top_level = False
self.eventloop.schedule(handler, message)
except:
self.logger.exception('Failed to execute %s handler.', method)
def on_unknown(self, message):
""" Unknown command. """
self.logger.warning('Unknown command: [%s] %s %s', message.source, message.command, message.params)
def _ignored(self, message):
""" Ignore message. """
pass
def __getattr__(self, attr):
""" Return on_unknown or _ignored for unknown handlers, depending on the invocation type. """
# Is this a raw handler?
if attr.startswith('on_raw_'):
# Are we in on_raw() trying to find any message handler?
if self._handler_top_level:
# In that case, return the method that logs and possibly acts on unknown messages.
return self.on_unknown
# Are we in an existing handler calling super()?
else:
# Just ignore it, then.
return self._ignored
# This isn't a handler, just raise an error.
raise AttributeError(attr)
class ClientPool:
""" A pool of clients that are ran and handled in parallel. """
def __init__(self, clients=None, eventloop=None):
if not eventloop:
self.eventloop = async.EventLoop()
else:
self.eventloop = eventloop
if not clients:
self.clients = set()
else:
self.clients = set(clients)
def add(self, client):
""" Add client to pool. """
self.clients.add(client)
def remove(self, client):
""" Remove client from pool. """
self.clients.remove(client)
def __contains__(self, item):
return item in self.clients
## High-level.
def connect(self, client, *args, eventloop=None, **kwargs):
"""
Add client to pool and connect it using the given argument.
Refer to the connect() method of the added client for details on parameters.
"""
if client not in self:
self.add(client)
client.connect(*args, eventloop=self.eventloop, **kwargs)
def disconnect(self, client, *args, **kwargs):
"""
Disconnect client from pool and remove it.
Refer to the disconnect() method of the removed client for details on parameters.
"""
if client not in self:
return
client.disconnect(*args, **kwargs)
self.remove(client)
def handle_forever(self):
""" Main loop of the pool: handle clients forever, until the event loop is stopped. """
for client in self.clients:
client.connection.setup_handlers()
self.eventloop.run()
for client in self.clients:
client.connection.remove_handlers()
|
|
#!/usr/bin/env python
## Bacon Game Jam 08
## Harry Beadle
## Solo Entry
#### Theme: "Millions of them."
#### Interpritation: "Google Searches"
# Requires Random and Urllib2
from random import shuffle
from urllib2 import urlopen
# Requires Pygame
import pygame as p
p.init()
X = 1280
Y = 720
Origin = (0, 0)
scr = p.display.set_mode((X, Y))
BLACK = 000,000,000
WHITE = 255,255,255
GREY = 215,215,215
DGRAY = 241,241,241
CKEY = 000,255,000
RED = 187,66,40
GREEN = 83,151,88
BLUE = 90,131,253
ARIAL = p.font.Font("Arial.ttf", 50)
TITLE = p.font.Font("Arial.ttf", 130)
surfaceForeground = p.Surface((X, Y))
surfaceForeground.set_colorkey(CKEY)
surfaceForeground.fill(surfaceForeground.get_colorkey())
p.draw.line(surfaceForeground, BLACK, (0, Y/2), (X, Y/2), 10)
p.draw.rect(surfaceForeground, WHITE, (555,325,180,70))
p.draw.rect(surfaceForeground, BLACK, (555,325,180,70), 5)
surfaceForeground.blit(ARIAL.render("OR", 1, BLACK), (((X-75)/2),((Y-57)/2)))
p.draw.rect(surfaceForeground, WHITE, (140,490,1000,100))
p.draw.rect(surfaceForeground, WHITE, (140,130,1000,100))
surfacePrimary = p.Surface((X, Y))
surfacePrimary.set_colorkey(CKEY)
surfacePrimary.fill(surfacePrimary.get_colorkey())
p.draw.rect(surfaceForeground, GREY, (140,490,1000,100), 3)
p.draw.rect(surfaceForeground, BLUE, (140,130,1000,100), 3)
surfaceSecondary = p.Surface((X, Y))
surfaceSecondary.set_colorkey(CKEY)
surfaceSecondary.fill(surfaceSecondary.get_colorkey())
p.draw.rect(surfaceSecondary, BLUE, (140,490,1000,100), 3)
p.draw.rect(surfaceSecondary, GREY, (140,130,1000,100), 3)
imgGoogle = p.image.load("google.bmp").convert()
imgGoogle.set_colorkey(WHITE)
imgBacon = p.image.load("bacon.png")
imgBacon.set_colorkey(WHITE)
## Top Searches of 2013 by Catagory
Searches_By_Catagory = {
"All" : ["Paul Walker", "iPhone 5S", "Royal Baby", "Cory Monteith", "Oscar Pistorious", "Nelson Mandela", "Grand National 2013", "Universal Jobmatch", "Margret Thatcher", "Xbox One"],
"People" : ["Paul Walker", "Cory Monteith", "Oscar Pistorious", "Nelson Mandela", "Margret Thathcher", "Peter Capaldi", "Nigella Lawson", "Tom Daley", "Lou Reed", "Joey Essex"],
"Events" : ["Grand National", "Wimbolden", "Eurovision", "Confederations Cup", "The Oscars", "Red Nose Day", "Glastonbury", "Lovebox", "Brit Awards", "The Ashes"],
"Football Stars" : ["Gareth Bale", "Willian Borges da Silva", "Mesut Ozil", "David Moyes", "Christian Bentiez", "Thiago Alcantara", "Paul Gascoigne", "Gonzalo, Higuain", "Adnan Januzaj", "Razor Ruddock"],
"How To" : ["How to make pancakes", "How to wrtite a CV", "How to loose weight", "How to draw manga", "How to play poker", "How to play guitar", "How to get a flat stomach", "How to dip dye hair", "How to reset iPod", "How to find IP address"],
"Movies" : ["Man of Steel", "World War Z", "Iron Man 3", "The Conjuring", "Dispicable Me 2", "The Impossible", "The Life of Pi", "Insidious", "Elysium", "Skyfall"],
"Songs" : ["Harlem Shake", "Gangnam Style", "Blured Lines", "Thrift Shop", "Wreckig Ball", "Roar", "Impossible", "Holy Grail", "Get Lucky", "Mirrors"],
"Places" : ["Rome", "New York", "Amsterdam", "Palma", "Magaluf", "Bangkok", "Sydney", "Bruges", "Venice", "Mauritius"],
"TV" : ["Eastenders", "Breaking Bad", "Coronation Street", "Big Brother 2013", "Strictly Come Dancing", "Emmerdale", "Holyoaks", "Daybreak", "Top Gear", "The Voice"],
"What Is" : ["What is twerking?", "What is my IP?", "What is yolo?", "What is a prime number?", "What is illuminati?", "What is my car worth?", "What is spooning?", "What is global warming?", "What is Zumba?", "What is the meaning of life?"]
}
Catagory = [
"All",
"People",
"Events",
"Football Stars",
"How To",
"Movies",
"Songs",
"Places",
"TV",
"What Is"
]
def getLiveList():
List = []
Atom = urlopen("http://www.google.com/trends/hottrends/atom/feed")
for Line in Atom.read().split("\n"):
if ("<title>" in Line) and ("</title>" in Line) and not ("Hot Trends" in Line):
List.append(Line[15:-8])
if len(List) % 2 != 0:
del List[-1]
return List
def Input():
Output = []
for Event in p.event.get(p.KEYDOWN):
if Event.key == p.K_UP: Output.append("Up")
if Event.key == p.K_DOWN: Output.append("Down")
if Event.key == p.K_LEFT: Output.append("Left")
if Event.key == p.K_RIGHT: Output.append("Right")
if Event.key == p.K_RETURN: Output.append("Enter")
if Event.key == p.K_ESCAPE: p.quit();quit()
if p.event.get(p.MOUSEBUTTONUP): Output.append("Mouse")
if p.event.get(p.QUIT): p.quit();quit()
return Output
def splash():
# Splash Screen
scr.fill((190,70,46))
scr.blit(
imgBacon,
(
(X-imgBacon.get_size()[0])/2,
(Y-imgBacon.get_size()[1])/2
)
)
p.display.flip()
p.time.wait(1000)
def mainMenu():
scr.fill(DGRAY)
p.draw.rect(scr, WHITE, (106,360,480,180))
p.draw.rect(scr, WHITE, (694,360,480,180))
p.draw.rect(scr, BLUE, (106,360,480,180), 3)
p.draw.rect(scr, GREY, (694,360,480,180), 3)
scr.blit(
ARIAL.render("Trending Now", 1, BLACK),
(180, 420)
)
scr.blit(
ARIAL.render("Top of 2013", 1, BLACK),
(810, 420)
)
scr.blit(
TITLE.render("Trends", 1, BLACK),
(720, 105)
)
scr.blit(imgGoogle, (150, 90))
Selection = 0
p.display.flip()
while True:
INPUT = Input()
if "Enter" in INPUT or "Mouse" in INPUT:
return Selection
if ("Right" in INPUT or p.mouse.get_pos()[0]>(X/2)) and Selection == 0:
Selection = 1
p.draw.rect(scr, GREY, (106,360,480,180), 3)
p.draw.rect(scr, BLUE, (694,360,480,180), 3)
if ("Left" in INPUT or p.mouse.get_pos()[0]<(X/2)) and Selection == 1:
Selection = 0
p.draw.rect(scr, BLUE, (106,360,480,180), 3)
p.draw.rect(scr, GREY, (694,360,480,180), 3)
p.display.flip()
def catagorySelection():
scr.fill(DGRAY)
p.draw.rect(scr, WHITE, (140,310,1000,100))
p.draw.rect(scr, BLUE, (140,310,1000,100), 3)
scr.blit(
ARIAL.render("Choose your catagory... (use arrow keys)", 1, BLACK),
(
(140,250)
)
)
Element = 0
preElement = -1
while True:
INPUT = Input()
if "Right" in INPUT and Element != (len(Catagory)-1):
Element += 1
if "Left" in INPUT and Element != 0:
Element -= 1
if "Enter" in INPUT:
return Searches_By_Catagory[Catagory[Element]]
if Element != preElement:
p.draw.rect(scr, WHITE, (140,310,1000,100))
p.draw.rect(scr, BLUE, (140,310,1000,100), 3)
String = Catagory[Element] + " " + "(%i/%i)" % (Element + 1, len(Catagory))
scr.blit(
ARIAL.render(String, 1, BLACK),
(
(X-ARIAL.size(String)[0])/2,
(Y-ARIAL.size(String)[1])/2
)
)
preElement = Element
p.display.flip()
def play(String1, String2):
scr.fill(DGRAY)
scr.blit(surfacePrimary, Origin)
surfaceForegroundTemp = p.Surface((X, Y))
surfaceForegroundTemp.set_colorkey(CKEY)
surfaceForegroundTemp.fill(surfaceForegroundTemp.get_colorkey())
surfaceForegroundTemp.blit(surfaceForeground, Origin)
surfaceForegroundTemp.blit(
ARIAL.render(String1, 1, BLACK),
(
(X-ARIAL.size(String1)[0])/2,
152
)
)
surfaceForegroundTemp.blit(
ARIAL.render(String2, 1, BLACK),
(
(X-ARIAL.size(String2)[0])/2,
512
)
)
State = 0
scr.blit(surfaceForegroundTemp, Origin)
p.display.flip()
while True:
INPUT = Input()
if "Enter" in INPUT or "Mouse" in INPUT:
return State
if ("Up" in INPUT or p.mouse.get_pos()[1] < (Y/2)) and State == 1:
scr.blit(surfaceForegroundTemp, Origin)
scr.blit(surfacePrimary, Origin)
State = 0
if ("Down" in INPUT or p.mouse.get_pos()[1] > (Y/2)) and State == 0:
scr.blit(surfaceForegroundTemp, Origin)
scr.blit(surfaceSecondary, Origin)
State = 1
p.display.flip()
def finishPage(Score, List):
scr.fill(DGRAY)
Percentage = (float(Score)*100)/(float(len(List))/float(2))
Percentage = str(int(Percentage)) + "% - Press enter to continue to Main Menu."
scr.blit(
ARIAL.render(Percentage, 1, BLACK),
(
(X-ARIAL.size(Percentage)[0])/2,
(Y-ARIAL.size(Percentage)[1])/2
)
)
while True:
INPUT = Input()
if "Enter" in INPUT or "Mouse" in INPUT:
return
p.display.flip()
def main():
Selection = mainMenu()
if Selection == 0:
scr.fill(DGRAY)
scr.blit(
ARIAL.render("Loading...", 1, BLACK),
(
(X-ARIAL.size("Loading...")[0])/2,
(Y-ARIAL.size("Loading...")[1])/2
)
)
p.display.flip()
List = getLiveList()
if Selection == 1:
List = catagorySelection()
originalPosition = {}
count = 0
for e in List:
originalPosition.update({e : count})
count += 1
shuffle(List)
Score = 0
for x in range(0,len(List),2):
Win = 1
if originalPosition[List[x]] > originalPosition[List[x+1]]: Win = 0
if play(List[x], List[x+1]) != Win:
Score += 1
finishPage(Score, List)
if __name__ == '__main__':
splash()
while True: main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import decorators
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestInstances(helpers.TestCase):
INSTANCE_NAME = helpers.gen_random_resource_name('instance',
timestamp=False)
@property
def instances_page(self):
return self.home_pg.go_to_compute_instancespage()
def test_create_delete_instance(self):
"""tests the instance creation and deletion functionality:
* creates a new instance in Project > Compute > Instances page
* verifies the instance appears in the instances table as active
* deletes the newly created instance via proper page (depends on user)
* verifies the instance does not appear in the table after deletion
"""
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_active(self.INSTANCE_NAME))
instances_page = self.instances_page
instances_page.delete_instance(self.INSTANCE_NAME)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_deleted(self.INSTANCE_NAME))
@decorators.skip_because(bugs=['1584057'])
def test_instances_pagination(self):
"""This test checks instance pagination
Steps:
1) Login to Horizon Dashboard as regular user
2) Navigate to user settings page
3) Change 'Items Per Page' value to 1
4) Go to Project > Compute > Instances page
5) Create 2 instances
6) Go to appropriate page (depends on user)
7) Check that only 'Next' link is available, only one instance is
available (and it has correct name) on the first page
8) Click 'Next' and check that on the second page only one instance is
available (and it has correct name), there is no 'Next' link on page
9) Go to user settings page and restore 'Items Per Page'
10) Delete created instances via proper page (depends on user)
"""
items_per_page = 1
instance_count = 2
instance_list = ["{0}-{1}".format(self.INSTANCE_NAME, item)
for item in range(1, instance_count + 1)]
first_page_definition = {'Next': True, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[1]]}
second_page_definition = {'Next': False, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[0]]}
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize(items_per_page)
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME,
instance_count=instance_count)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.is_instance_active(instance_list[1]))
instances_page = self.instances_page
instances_page.instances_table.assert_definition(
first_page_definition, sorting=True)
instances_page.instances_table.turn_next_page()
instances_page.instances_table.assert_definition(
second_page_definition, sorting=True)
instances_page = self.instances_page
instances_page.instances_table.assert_definition(
first_page_definition, sorting=True)
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize()
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.instances_page
instances_page.delete_instances(instance_list)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.are_instances_deleted(instance_list))
def test_instances_pagination_and_filtration(self):
"""This test checks instance pagination and filtration
Steps:
1) Login to Horizon Dashboard as regular user
2) Go to to user settings page
3) Change 'Items Per Page' value to 1
4) Go to Project > Compute > Instances page
5) Create 2 instances
6) Go to appropriate page (depends on user)
7) Check filter by Name of the first and the second instance in order
to have one instance in the list (and it should have correct name)
and no 'Next' link is available
8) Check filter by common part of Name of in order to have one instance
in the list (and it should have correct name) and 'Next' link is
available on the first page and is not available on the second page
9) Go to user settings page and restore 'Items Per Page'
10) Delete created instances via proper page (depends on user)
"""
items_per_page = 1
instance_count = 2
instance_list = ["{0}-{1}".format(self.INSTANCE_NAME, item)
for item in range(1, instance_count + 1)]
first_page_definition = {'Next': True, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[1]]}
second_page_definition = {'Next': False, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[0]]}
filter_first_page_definition = {'Next': False, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[1]]}
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize(items_per_page)
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME,
instance_count=instance_count)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.is_instance_active(instance_list[1]))
instances_page = self.instances_page
instances_page.instances_table.set_filter_value('name')
instances_page.instances_table.filter(instance_list[1])
instances_page.instances_table.assert_definition(
filter_first_page_definition, sorting=True)
instances_page.instances_table.filter(instance_list[0])
instances_page.instances_table.assert_definition(
second_page_definition, sorting=True)
instances_page.instances_table.filter(self.INSTANCE_NAME)
instances_page.instances_table.assert_definition(
first_page_definition, sorting=True)
instances_page.instances_table.filter('')
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize()
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.instances_page
instances_page.delete_instances(instance_list)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.are_instances_deleted(instance_list))
def test_filter_instances(self):
"""This test checks filtering of instances by Instance Name
Steps:
1) Login to Horizon dashboard as regular user
2) Go to Project > Compute > Instances
3) Create 2 instances
4) Go to appropriate page (depends on user)
5) Use filter by Instance Name
6) Check that filtered table has one instance only (which name is equal
to filter value) and no other instances in the table
7) Check that filtered table has both instances (search by common part
of instance names)
8) Set nonexistent instance name. Check that 0 rows are displayed
9) Clear filter and delete instances via proper page (depends on user)
"""
instance_count = 2
instance_list = ["{0}-{1}".format(self.INSTANCE_NAME, item)
for item in range(1, instance_count + 1)]
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME,
instance_count=instance_count)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.is_instance_active(instance_list[0]))
instances_page = self.instances_page
instances_page.instances_table.set_filter_value('name')
instances_page.instances_table.filter(instance_list[0])
self.assertTrue(instances_page.is_instance_present(instance_list[0]))
for instance in instance_list[1:]:
self.assertFalse(instances_page.is_instance_present(instance))
instances_page.instances_table.filter(self.INSTANCE_NAME)
for instance in instance_list:
self.assertTrue(instances_page.is_instance_present(instance))
nonexistent_instance_name = "{0}_test".format(self.INSTANCE_NAME)
instances_page.instances_table.filter(nonexistent_instance_name)
self.assertEqual(instances_page.instances_table.rows, [])
instances_page.instances_table.filter('')
instances_page.delete_instances(instance_list)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.are_instances_deleted(instance_list))
class TestAdminInstances(helpers.AdminTestCase, TestInstances):
INSTANCE_NAME = helpers.gen_random_resource_name('instance',
timestamp=False)
@property
def instances_page(self):
return self.home_pg.go_to_system_instancespage()
@decorators.skip_because(bugs=['1584057'])
def test_instances_pagination_and_filtration(self):
super(TestAdminInstances, self).\
test_instances_pagination_and_filtration()
|
|
"""
Test Workspace client
"""
__author__ = 'dang'
## Imports
# System
import logging
import time
import unittest
# Local
from . import shared
from doekbase.data_api import core, wsfile
from doekbase.workspace import client as ws_client
_log = logging.getLogger('doekbase.tests.test_ws_client')
genome_new = "ReferenceGenomeAnnotations/kb|g.166819"
genome_old = "OriginalReferenceGenomes/kb|g.166819"
taxon_new = "ReferenceTaxons/242159_taxon"
taxon_old = "OriginalReferenceGenomes/kb|g.166819"
def generic_object():
"""Get a generic object."""
return {'type': 'Empty.AType-1.0',
'data': {'hello': 'world'}}
NOT_SUPPORTED_MSG = 'Not supported by local Workspace implementation'
class WorkspaceTests(unittest.TestCase):
# maximum allowable version of workspace for this
# test suite to be valid
MAX_WS_VERSION = (0, 999, 999)
is_local = '://' not in core.g_ws_url
def setUp(self):
if self.is_local:
self.ws = wsfile.WorkspaceFile(shared.g_ws_url)
for ref in (genome_new, genome_old, taxon_new, taxon_old):
self.ws.load(ref.replace('/', '_'))
else:
self.ws = ws_client.Workspace(
url=shared.g_ws_url, token=shared.token)
self._my_ws = None
self._delete_ws = set()
def tearDown(self):
if not self.is_local:
# delete temporary workspaces
for ws in self._delete_ws:
_log.info('deleting temporary workspace: {}'.format(ws))
self.ws.delete_workspace({'workspace': ws})
def get_workspace(self, use_existing=True):
if use_existing and self._my_ws is not None:
return self._my_ws
name = 'foo-{:.3f}'.format(time.time())
ws_obj = self.ws.create_workspace({'workspace': name})
self._delete_ws.add(name)
if use_existing:
self._my_ws = ws_obj
return name
def test_ver(self):
value = self.ws.ver()
p = value.split('.')
assert len(p) == 3, 'Bad version number: {}'.format(value)
for i in range(3):
assert int(p[i]) <= self.MAX_WS_VERSION[i], \
"Version mismatch: {ver} > {expected}".format(
ver=value, expected='.'.join(map(str, self.MAX_WS_VERSION)))
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_create_workspace(self):
self.get_workspace()
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_alter_workspace_metadata(self):
name = self.get_workspace(False)
self.ws.alter_workspace_metadata({'wsi': {'workspace': name},
'new': {'trump': 'idiot'}})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_clone_workspace(self):
name = self.get_workspace()
name2 = 'bar-{:.3f}'.format(time.time())
self.ws.clone_workspace({'wsi': {'workspace': name},
'workspace': name2})
self._delete_ws.add(name2)
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_lock_workspace(self):
# this is not reversible and the workspace cannot
# be deleted (!), so running this as a test against a
# real workspace creates cruft.
# name = self.test_create_workspace()
# self.ws.lock_workspace({'workspace': name})
return
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_workspacemeta(self):
# deprecated form of get_workspace_info
return
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_workspace_info(self):
name = self.get_workspace()
self.ws.get_workspace_info({'workspace': name})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_workspace_description(self):
name = self.get_workspace()
self.ws.get_workspace_description({'workspace': name})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_set_permissions(self):
name = self.get_workspace(False)
self.ws.set_permissions({'workspace': name,
'new_permission': 'r',
'users': ['kbasetest']})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_set_global_permission(self):
name = self.get_workspace(False)
self.ws.set_global_permission({'workspace': name,
'new_permission': 'r'})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_set_workspace_description(self):
name = self.get_workspace()
self.ws.set_workspace_description(
{'workspace': name, 'description': 'quite lame'})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_permissions(self):
name = self.get_workspace()
self.ws.get_permissions({'workspace': name})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_save_objects(self):
name = self.get_workspace()
self.ws.save_objects({
'workspace': name, 'objects': [generic_object()]
})
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_administer(self):
try:
self.ws.administer({})
except ws_client.ServerError as err:
# fail if this is NOT the "normal" error
# caused by lack of admin. permissions
assert 'not an admin' in str(err)
"""
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_object_provenance(self):
assert False
def test_get_objects(self):
assert False
def test_get_object_subset(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_object_history(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_list_referencing_objects(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_list_referencing_object_counts(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_referenced_objects(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_list_workspaces(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_list_workspace_info(self):
assert False
def test_list_workspace_objects(self):
assert False
def test_list_objects(self):
assert False
def test_get_objectmeta(self):
assert False
def test_get_object_info(self):
assert False
def test_get_object_info_new(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_rename_workspace(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_rename_object(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_copy_object(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_revert_object(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_hide_objects(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_unhide_objects(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_delete_objects(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_undelete_objects(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_delete_workspace(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_undelete_workspace(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_request_module_ownership(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_register_typespec(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_register_typespec_copy(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_release_module(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_list_modules(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_list_module_versions(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_module_info(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_jsonschema(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_translate_from_MD5_types(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_translate_to_MD5_types(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_type_info(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_all_type_info(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_func_info(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_get_all_func_info(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_grant_module_ownership(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_remove_module_ownership(self):
assert False
@unittest.skipIf(is_local, NOT_SUPPORTED_MSG)
def test_list_all_types(self):
assert False
"""
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the cinder.db namespace. Call these
functions from cinder.db namespace, not the cinder.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo.config import cfg
from oslo.db import concurrency as db_concurrency
from oslo.db import options as db_options
db_opts = [
# TODO(rpodolyaka): this option is deprecated but still passed to
# LazyPluggable class which doesn't support retrieving
# of options put into groups. Nova's version of this
# class supports this. Perhaps, we should put it to Oslo
# and then reuse here.
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for db'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
cfg.StrOpt('backup_name_template',
default='backup-%s',
help='Template string to be used to generate backup names'), ]
CONF = cfg.CONF
CONF.register_opts(db_opts)
db_options.set_defaults(CONF)
CONF.set_default('sqlite_db', 'cinder.sqlite', group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'}
IMPL = db_concurrency.TpoolDbapiWrapper(CONF, _BACKEND_MAPPING)
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic, disabled=None):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic, disabled=disabled)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_all_volume_sorted(context):
"""Get all volume services sorted by volume count.
:returns: a list of (Service, volume_count) tuples.
"""
return IMPL.service_get_all_volume_sorted(context)
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def iscsi_target_count_by_host(context, host):
"""Return count of export devices."""
return IMPL.iscsi_target_count_by_host(context, host)
def iscsi_target_create_safe(context, values):
"""Create an iscsi_target from the values dictionary.
The device is not returned. If the create violates the unique
constraints because the iscsi_target and host already exist,
no exception is raised.
"""
return IMPL.iscsi_target_create_safe(context, values)
###############
def volume_allocate_iscsi_target(context, volume_id, host):
"""Atomically allocate a free iscsi_target from the pool."""
return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
def volume_attached(context, volume_id, instance_id, host_name, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
mountpoint)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_host(context, host, count_only=False):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_host(context,
host,
count_only)
def volume_data_get_for_project(context, project_id):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id)
def finish_volume_migration(context, src_vol_id, dest_vol_id):
"""Perform database updates upon completion of volume migration."""
return IMPL.finish_volume_migration(context, src_vol_id, dest_vol_id)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context, marker, limit, sort_key, sort_dir,
filters=None):
"""Get all volumes."""
return IMPL.volume_get_all(context, marker, limit, sort_key, sort_dir,
filters=filters)
def volume_get_all_by_host(context, host):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host)
def volume_get_all_by_group(context, group_id):
"""Get all volumes belonging to a consistency group."""
return IMPL.volume_get_all_by_group(context, group_id)
def volume_get_all_by_project(context, project_id, marker, limit, sort_key,
sort_dir, filters=None):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id, marker, limit,
sort_key, sort_dir, filters=filters)
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
def volume_update(context, volume_id, values):
"""Set the given properties on an volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context)
def snapshot_get_all_by_project(context, project_id):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id)
def snapshot_get_all_for_cgsnapshot(context, project_id):
"""Get all snapshots belonging to a cgsnapshot."""
return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
"""Get count and gigabytes used for snapshots for specified project."""
return IMPL.snapshot_data_get_for_project(context,
project_id,
volume_type_id)
def snapshot_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the snapshots inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.snapshot_get_active_by_window(context, begin, end, project_id)
####################
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id)
def snapshot_metadata_delete(context, snapshot_id, key):
"""Delete the given metadata item."""
return IMPL.snapshot_metadata_delete(context, snapshot_id, key)
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.snapshot_metadata_update(context, snapshot_id,
metadata, delete)
####################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_metadata_delete(context, volume_id, key)
def volume_metadata_update(context, volume_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_metadata_update(context, volume_id, metadata, delete)
##################
def volume_admin_metadata_get(context, volume_id):
"""Get all administration metadata for a volume."""
return IMPL.volume_admin_metadata_get(context, volume_id)
def volume_admin_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_admin_metadata_delete(context, volume_id, key)
def volume_admin_metadata_update(context, volume_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_admin_metadata_update(context, volume_id, metadata,
delete)
##################
def volume_type_create(context, values):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values)
def volume_type_get_all(context, inactive=False):
"""Get all volume types."""
return IMPL.volume_type_get_all(context, inactive)
def volume_type_get(context, id, inactive=False):
"""Get volume type by id."""
return IMPL.volume_type_get(context, id, inactive)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Get volume types by name or id."""
return IMPL.volume_types_get_by_name_or_id(context, volume_type_list)
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
"""Get volume types that are associated with specific qos specs."""
return IMPL.volume_type_qos_associations_get(context,
qos_specs_id,
inactive)
def volume_type_qos_associate(context, type_id, qos_specs_id):
"""Associate a volume type with specific qos specs."""
return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id)
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate a volume type from specific qos specs."""
return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id)
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types from specific qos specs."""
return IMPL.volume_type_qos_disassociate_all(context,
qos_specs_id)
def volume_type_qos_specs_get(context, type_id):
"""Get all qos specs for given volume type."""
return IMPL.volume_type_qos_specs_get(context, type_id)
def volume_type_destroy(context, id):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, id)
def volume_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs):
"""Create or update volume type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument
"""
return IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs)
###################
def volume_type_encryption_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_get(context, volume_type_id, session)
def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
def volume_type_encryption_create(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_create(context, volume_type_id,
encryption_specs)
def volume_type_encryption_update(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_update(context, volume_type_id,
encryption_specs)
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_volume_get(context, volume_type_id,
session)
def volume_encryption_metadata_get(context, volume_id, session=None):
return IMPL.volume_encryption_metadata_get(context, volume_id, session)
###################
def qos_specs_create(context, values):
"""Create a qos_specs."""
return IMPL.qos_specs_create(context, values)
def qos_specs_get(context, qos_specs_id):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get(context, qos_specs_id)
def qos_specs_get_all(context, inactive=False, filters=None):
"""Get all qos_specs."""
return IMPL.qos_specs_get_all(context, inactive, filters)
def qos_specs_get_by_name(context, name):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get_by_name(context, name)
def qos_specs_associations_get(context, qos_specs_id):
"""Get all associated volume types for a given qos_specs."""
return IMPL.qos_specs_associations_get(context, qos_specs_id)
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate qos_specs from volume type."""
return IMPL.qos_specs_associate(context, qos_specs_id, type_id)
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate qos_specs from volume type."""
return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id)
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate qos_specs from all entities."""
return IMPL.qos_specs_disassociate_all(context, qos_specs_id)
def qos_specs_delete(context, qos_specs_id):
"""Delete the qos_specs."""
return IMPL.qos_specs_delete(context, qos_specs_id)
def qos_specs_item_delete(context, qos_specs_id, key):
"""Delete specified key in the qos_specs."""
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
def qos_specs_update(context, qos_specs_id, specs):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
return IMPL.qos_specs_update(context, qos_specs_id, specs)
###################
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
return IMPL.volume_glance_metadata_create(context,
volume_id,
key,
value)
def volume_glance_metadata_get_all(context):
"""Return the glance metadata for all volumes."""
return IMPL.volume_glance_metadata_get_all(context)
def volume_glance_metadata_get(context, volume_id):
"""Return the glance metadata for a volume."""
return IMPL.volume_glance_metadata_get(context, volume_id)
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id)
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This will copy all of the key:value pairs from the originating volume,
to ensure that a volume created from the snapshot will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id,
volume_id)
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot).
This will copy all of the key:value pairs from the originating snapshot,
to ensure that the Glance metadata from the original volume is retained.
"""
return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id,
snapshot_id)
def volume_glance_metadata_delete_by_volume(context, volume_id):
"""Delete the glance metadata for a volume."""
return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id)
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
"""Delete the glance metadata for a snapshot."""
return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume by copying all of the key:value
pairs from the originating volume.
This is so that a volume created from the volume (clone) will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_from_volume_to_volume(
context,
src_volume_id,
volume_id)
###################
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def backup_get(context, backup_id):
"""Get a backup or raise if it does not exist."""
return IMPL.backup_get(context, backup_id)
def backup_get_all(context, filters=None):
"""Get all backups."""
return IMPL.backup_get_all(context, filters=filters)
def backup_get_all_by_host(context, host):
"""Get all backups belonging to a host."""
return IMPL.backup_get_all_by_host(context, host)
def backup_create(context, values):
"""Create a backup from the values dictionary."""
return IMPL.backup_create(context, values)
def backup_get_all_by_project(context, project_id, filters=None):
"""Get all backups belonging to a project."""
return IMPL.backup_get_all_by_project(context, project_id,
filters=filters)
def backup_update(context, backup_id, values):
"""Set the given properties on a backup and update it.
Raises NotFound if backup does not exist.
"""
return IMPL.backup_update(context, backup_id, values)
def backup_destroy(context, backup_id):
"""Destroy the backup or raise if it does not exist."""
return IMPL.backup_destroy(context, backup_id)
###################
def transfer_get(context, transfer_id):
"""Get a volume transfer record or raise if it does not exist."""
return IMPL.transfer_get(context, transfer_id)
def transfer_get_all(context):
"""Get all volume transfer records."""
return IMPL.transfer_get_all(context)
def transfer_get_all_by_project(context, project_id):
"""Get all volume transfer records for specified project."""
return IMPL.transfer_get_all_by_project(context, project_id)
def transfer_create(context, values):
"""Create an entry in the transfers table."""
return IMPL.transfer_create(context, values)
def transfer_destroy(context, transfer_id):
"""Destroy a record in the volume transfer table."""
return IMPL.transfer_destroy(context, transfer_id)
def transfer_accept(context, transfer_id, user_id, project_id):
"""Accept a volume transfer."""
return IMPL.transfer_accept(context, transfer_id, user_id, project_id)
###################
def consistencygroup_get(context, consistencygroup_id):
"""Get a consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_get(context, consistencygroup_id)
def consistencygroup_get_all(context):
"""Get all consistencygroups."""
return IMPL.consistencygroup_get_all(context)
def consistencygroup_get_all_by_host(context, host):
"""Get all consistencygroups belonging to a host."""
return IMPL.consistencygroup_get_all_by_host(context, host)
def consistencygroup_create(context, values):
"""Create a consistencygroup from the values dictionary."""
return IMPL.consistencygroup_create(context, values)
def consistencygroup_get_all_by_project(context, project_id):
"""Get all consistencygroups belonging to a project."""
return IMPL.consistencygroup_get_all_by_project(context, project_id)
def consistencygroup_update(context, consistencygroup_id, values):
"""Set the given properties on a consistencygroup and update it.
Raises NotFound if consistencygroup does not exist.
"""
return IMPL.consistencygroup_update(context, consistencygroup_id, values)
def consistencygroup_destroy(context, consistencygroup_id):
"""Destroy the consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_destroy(context, consistencygroup_id)
###################
def cgsnapshot_get(context, cgsnapshot_id):
"""Get a cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_get(context, cgsnapshot_id)
def cgsnapshot_get_all(context):
"""Get all cgsnapshots."""
return IMPL.cgsnapshot_get_all(context)
def cgsnapshot_get_all_by_host(context, host):
"""Get all cgsnapshots belonging to a host."""
return IMPL.cgsnapshot_get_all_by_host(context, host)
def cgsnapshot_create(context, values):
"""Create a cgsnapshot from the values dictionary."""
return IMPL.cgsnapshot_create(context, values)
def cgsnapshot_get_all_by_group(context, group_id):
"""Get all cgsnapshots belonging to a consistency group."""
return IMPL.cgsnapshot_get_all_by_group(context, group_id)
def cgsnapshot_get_all_by_project(context, project_id):
"""Get all cgsnapshots belonging to a project."""
return IMPL.cgsnapshot_get_all_by_project(context, project_id)
def cgsnapshot_update(context, cgsnapshot_id, values):
"""Set the given properties on a cgsnapshot and update it.
Raises NotFound if cgsnapshot does not exist.
"""
return IMPL.cgsnapshot_update(context, cgsnapshot_id, values)
def cgsnapshot_destroy(context, cgsnapshot_id):
"""Destroy the cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_destroy(context, cgsnapshot_id)
|
|
# Copyright 2017 CBSD Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from __builtin__ import True
class Stack:
def __init__(self):
self.stack_items = []
def append(self, stack_item):
self.stack_items.append(stack_item)
return self
def __repr__(self):
stack_dump = ''
for item in self.stack_items:
stack_dump += str(item)
return stack_dump
def __str__(self):
stack_dump = ''
for item in self.stack_items:
stack_dump += str(item)
return stack_dump
class StackItem:
def __init__(self, reason, expected, actual):
self.reason = reason
self.expected = expected
self.actual = actual
def __repr__(self):
return 'Reason: {0}\nExpected:\n{1}\nActual:\n{2}' \
.format(self.reason, _format_value(self.expected), _format_value(self.actual))
def __str__(self):
return '\n\nReason: {0}\nExpected:\n{1}\nActual:\n{2}' \
.format(self.reason, _format_value(self.expected), _format_value(self.actual))
def _indent(s):
return '\n'.join(' ' + line for line in s.splitlines())
def _format_value(value):
return _indent(_generate_pprint_json(value))
def _generate_pprint_json(value):
return json.dumps(value, sort_keys=True, indent=4)
def _is_dict_same(expected, actual, ignore_value_of_keys):
# DAN - I had to flip flop this
for key in expected:
if not key in actual:
return False, \
Stack().append(
StackItem('Expected key "{0}" Missing from Actual'
.format(key),
expected,
actual))
if not key in ignore_value_of_keys:
# have to change order
#are_same_flag, stack = _are_same(actual[key], expected[key], ignore_value_of_keys)
### key part noam ###
are_same_flag, stack = _are_same(expected[key], actual[key],ignore_value_of_keys)
if not are_same_flag:
return False, \
stack.append(StackItem('Different values', expected[key], actual[key]))
return True, Stack()
def _is_list_same(expected, actual, ignore_value_of_keys):
for i in xrange(len(expected)):
are_same_flag, stack = _are_same(expected[i], actual[i], ignore_value_of_keys)
if not are_same_flag:
return False, \
stack.append(
StackItem('Different values (Check order)', expected[i], actual[i]))
return True, Stack()
def _bottom_up_sort(unsorted_json):
if isinstance(unsorted_json, list):
new_list = []
for i in xrange(len(unsorted_json)):
new_list.append(_bottom_up_sort(unsorted_json[i]))
return sorted(new_list)
elif isinstance(unsorted_json, dict):
new_dict = {}
for key in sorted(unsorted_json):
new_dict[key] = _bottom_up_sort(unsorted_json[key])
return new_dict
else:
return unsorted_json
def _are_same(expected, actual, ignore_value_of_keys, ignore_json_length=False):
# Check for None
if expected is None:
return expected == actual, Stack()
# Ensure they are of same type
if type(expected) != type(actual):
inRange = False
if "$" in str(expected):
inRange = validate_Json_Value_Special_Sign(expected,actual)
expected = inRange
if(inRange==False):
return False, \
Stack().append(
StackItem('Type Mismatch: Expected Type: {0}, Actual Type: {1}'
.format(type(expected), type(actual)),
expected,
actual))
return inRange,Stack()
# Compare primitive types immediately
if type(expected) in (int, str, bool, long, float, unicode):
if "$" in str(expected):
try:
inRange = validate_Json_Value_Special_Sign(expected,actual)
except Exception as e:
return False,\
Stack().append(
StackItem(e.message,
expected,
actual))
if(inRange == False):
return False, \
Stack().append(
StackItem('Not in range: the value : {0} , is not in the range expected : {1}'
.format(actual, str(expected)[str(expected).find(":")+1: len(str(expected))-1]),
expected,
actual))
return inRange,Stack()
return str(expected).replace('"', "") == str(actual).replace('"', ""), Stack()
# Ensure collections have the same length (if applicable)
if ignore_json_length:
# Ensure collections has minimum length (if applicable)
# This is a short-circuit condition because (b contains a)
if len(expected) > len(actual):
return False, \
Stack().append(
StackItem('Length Mismatch: Minimum Expected Length: {0}, Actual Length: {1}'
.format(len(expected), len(actual)),
expected,
actual))
else:
# Ensure collections has same length
if len(expected) != len(actual):
return False, \
Stack().append(
StackItem('Length Mismatch: Expected Length: {0}, Actual Length: {1}'
.format(len(expected), len(actual)),
expected,
actual))
if isinstance(expected, dict):
return _is_dict_same(expected, actual, ignore_value_of_keys)
if isinstance(expected, list):
return _is_list_same(expected, actual, ignore_value_of_keys)
return False, Stack().append(StackItem('Unhandled Type: {0}'.format(type(expected)), expected, actual))
def are_same(original_a, original_b, ignore_list_order_recursively=False, ignore_value_of_keys=[]):
if ignore_list_order_recursively:
a = _bottom_up_sort(original_a)
b = _bottom_up_sort(original_b)
else:
a = original_a
b = original_b
return _are_same(a, b, ignore_value_of_keys,len(ignore_value_of_keys)>0)
def contains(expected_original, actual_original, ignore_list_order_recursively=False, ignore_value_of_keys=[]):
if ignore_list_order_recursively:
actual = _bottom_up_sort(actual_original)
expected = _bottom_up_sort(expected_original)
else:
actual = actual_original
expected = expected_original
return _are_same(expected, actual, ignore_value_of_keys, True)
def json_are_same(a, b, ignore_list_order_recursively=False, ignore_value_of_keys=[]):
return are_same(json.loads(a), json.loads(b), ignore_list_order_recursively, ignore_value_of_keys)
def validate_Json_Value_Special_Sign(expected,actual):
strExpected = str(expected)
strAcutal = str(actual)
if("range" in str(expected)):
indexOfPunctuation = strExpected.find(":")
indexOfSeperationSign = strExpected.find("To")
lowestVal = strExpected[indexOfPunctuation+1:indexOfSeperationSign]
heighestVal = strExpected[indexOfSeperationSign+2:len(strExpected)-1].replace("}","")
if float(lowestVal) <= float(strAcutal) and (float(heighestVal) >= float(strAcutal)):
return True
raise Exception ("ERROR - the actual value : " + strAcutal + " not in the range expected : " + lowestVal + " To : " + heighestVal )
if("or" in strExpected):
try:
for value in expected.iteritems() :
strActualOnlyLetters =strAcutal.replace("[", "").replace("u'", "").replace("'","").replace("]","")
for value in value[1]:
strExpectedOnlyLetters = str(value).lower().replace("[", "").replace("u'", "").replace("'","").replace("]","")
if len((strExpectedOnlyLetters).split(","))>1 :
valuesOfExpected = strExpectedOnlyLetters.split(",")
valuesOfActual = strActualOnlyLetters.split(",")
for cell in valuesOfActual:
if cell in valuesOfExpected:
return value
if strExpectedOnlyLetters.upper() == strActualOnlyLetters.upper():
# if strExpectedOnlyLetters.upper() == strAcutal.upper():
return value
except :
indexOfPuncuation = strExpected.find(":")
strExpected = strExpected[indexOfPuncuation+1:]
if "[[" in strExpected :
optionsArray = strExpected[1:-1].split("],[")
i=0
for key in optionsArray :
key = key.replace("[","").replace("]","")
optionsArray[i] = key
i+=1
else:
optionsArray = strExpected.replace("'", "").replace("[","").replace("]","").split(",")
for value in optionsArray :
strExpectedOnlyLetters = str(value).replace("[", "").replace("u'", "").replace("'","").replace('"',"").replace("]","")
strActualOnlyLetters =strAcutal.replace("[", "").replace("u'", "").replace("'","").replace("]","")
if len((value).split(","))>1 :
valuesOfExpected = value.replace('"',"").split(",")
valuesOfActual = strActualOnlyLetters.split(",")
for cell in valuesOfActual:
if str(cell).strip() not in valuesOfExpected:
return False
return value
if strExpectedOnlyLetters == strAcutal:
return value
raise Exception ("ERROR - the actual value : " + strAcutal + " is not one of the valid options in the json file " )
if("maximumLength" in strExpected):
indexOfPunctuation = strExpected.find(":")
lenExpected = strExpected[indexOfPunctuation+1:len(strExpected)-1]
if(len(strAcutal)<=int(lenExpected)):
return strAcutal
raise Exception ("the actual value : " + strAcutal + " is more then the limit length : " + lenExpected )
from collections import OrderedDict
def Get_Json_After_Parse_To_Dic(jsonFileName, confFile, dirPath):
'''
the method get the jsonFileName the config file and the path leading to that file
perform a loading of the json in an order way to an dictionary
'''
filePath = os.path.normpath(os.path.join(str(dirPath), confFile.getElementsByTagName("jsonsRepoPath")[0].firstChild.data))
if("Optional" in str(jsonFileName)):
filePath = os.path.join(filePath, "OptionalParams")
myfile = open(os.path.join(filePath,str(jsonFileName)))
jsonAfterParse = json.load(myfile, object_pairs_hook=OrderedDict)
return jsonAfterParse
def get_Node_Of_Json_Parsed(jsonFileName,nodeOfJsonRequest,confFile,dirPath):
jsonAfterParse = Get_Json_After_Parse_To_Dic(jsonFileName, confFile, dirPath)
if(Is_Json_contains_key(jsonFileName, nodeOfJsonRequest, confFile, dirPath,jsonAfterParse)):
return jsonAfterParse[nodeOfJsonRequest]
else:
raise IOError("ERROR - the node : " + str(nodeOfJsonRequest) +" not exists in the expected json file :" + str(jsonFileName))
def Is_Json_contains_key(jsonFileName,nodeOfJsonRequest,confFile,dirPath,jsonAfterParse=None):
if(jsonAfterParse==None):
jsonAfterParse = Get_Json_After_Parse_To_Dic(jsonFileName, confFile, dirPath)
if(nodeOfJsonRequest in jsonAfterParse):
return True
return False
def ordered_dict_prepend(dct, key, value, dict_setitem=dict.__setitem__):
root = dct._OrderedDict__root
first = root[1]
if key in dct:
link = dct._OrderedDict__map[key]
link_prev, link_next, _ = link
link_prev[1] = link_next
link_next[0] = link_prev
link[0] = root
link[1] = first
root[1] = first[0] = link
else:
root[1] = first[0] = dct._OrderedDict__map[key] = [root, first, key]
return dict_setitem(dct, key, value)
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id: __init__.py 2270 2008-09-21 08:01:58Z Alex.Holkner $
import ctypes
import sys
import time
from pyglet.media import AudioPlayer, Listener, MediaException
from pyglet.media.drivers.openal import lib_openal as al
from pyglet.media.drivers.openal import lib_alc as alc
class OpenALException(MediaException):
pass
def _split_nul_strings(s):
# NUL-separated list of strings, double-NUL-terminated.
nul = False
i = 0
while True:
if s[i] == '\0':
if nul:
break
else:
nul = True
else:
nul = False
i += 1
s = s[:i - 1]
return s.split('\0')
def get_version():
major = alc.ALCint()
minor = alc.ALCint()
alc.alcGetIntegerv(_device, alc.ALC_MAJOR_VERSION,
ctypes.sizeof(major), major)
alc.alcGetIntegerv(_device, alc.ALC_MINOR_VERSION,
ctypes.sizeof(minor), minor)
return major.value, minor.value
def have_version(major, minor):
return (major, minor) <= get_version()
def get_extensions():
extensions = alc.alcGetString(_device, alc.ALC_EXTENSIONS)
# Check for null pointer
if not ctypes.cast(extensions, ctypes.c_void_p).value:
return []
if sys.platform == 'darwin':
return ctypes.cast(extensions, ctypes.c_char_p).value.split(' ')
else:
return _split_nul_strings(extensions)
def have_extension(extension):
return extension in get_extensions()
format_map = {
(1, 8): al.AL_FORMAT_MONO8,
(1, 16): al.AL_FORMAT_MONO16,
(2, 8): al.AL_FORMAT_STEREO8,
(2, 16): al.AL_FORMAT_STEREO16,
}
class OpenALAudioPlayer(AudioPlayer):
#: Seconds ahead to buffer audio. Keep small for low latency, but large
#: enough to avoid underruns. (0.05 is the minimum for my 2.2 GHz Linux)
_update_buffer_time = 0.2
#: Minimum size of an OpenAL buffer worth bothering with
_min_buffer_size = 512
#: Maximum size of an OpenAL buffer, in bytes. TODO: use OpenAL maximum
_max_buffer_size = 65536
UPDATE_PERIOD = 0.05
def __init__(self, audio_format):
super(OpenALAudioPlayer, self).__init__(audio_format)
try:
self._al_format = format_map[(audio_format.channels,
audio_format.sample_size)]
except KeyError:
raise OpenALException('Unsupported audio format.')
self._al_source = al.ALuint()
al.alGenSources(1, self._al_source)
# Seconds of audio currently queued not processed (estimate)
self._buffered_time = 0.0
# Seconds of audio into current (head) buffer
self._current_buffer_time = 0.0
# List of (timestamp, duration) corresponding to currently queued AL
# buffers
self._timestamps = []
# OpenAL 1.0 timestamp interpolation
self._timestamp_system_time = 0.0
# Desired play state (True even if stopped due to underrun)
self._playing = False
# Timestamp when paused
self._pause_timestamp = 0.0
self._eos_count = 0
def __del__(self):
try:
al.alDeleteSources(1, self._al_source)
except:
pass
def get_write_size(self):
t = self._buffered_time - self._current_buffer_time
size = int(max(0, self._update_buffer_time - t) * \
self.audio_format.bytes_per_second)
if size < self._min_buffer_size:
size = 0
return size
def write(self, audio_data):
buffer = al.ALuint()
al.alGenBuffers(1, buffer)
al.alBufferData(buffer,
self._al_format,
audio_data.data,
audio_data.length,
self.audio_format.sample_rate)
al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer))
self._buffered_time += audio_data.duration
self._timestamps.append((audio_data.timestamp, audio_data.duration))
audio_data.consume(audio_data.length, self.audio_format)
def write_eos(self):
if self._timestamps:
self._timestamps.append((None, None))
def write_end(self):
pass
def play(self):
if self._playing:
return
self._playing = True
self._al_play()
if not _have_1_1:
self._timestamp_system_time = time.time()
def _al_play(self):
if not self._timestamps:
return
state = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
al.alSourcePlay(self._al_source)
def stop(self):
if not self._playing:
return
self._pause_timestamp = self.get_time()
al.alSourcePause(self._al_source)
self._playing = False
def clear(self):
al.alSourceStop(self._al_source)
self._playing = False
processed = al.ALint()
al.alGetSourcei(self._al_source, al.AL_BUFFERS_PROCESSED, processed)
if processed.value:
buffers = (al.ALuint * processed.value)()
al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers)
al.alDeleteBuffers(len(buffers), buffers)
self._pause_timestamp = 0.0
self._buffered_time = 0.0
self._current_buffer_time = 0.0
self._timestamps = []
def pump(self):
# Release spent buffers
processed = al.ALint()
al.alGetSourcei(self._al_source, al.AL_BUFFERS_PROCESSED, processed)
processed = processed.value
if processed:
buffers = (al.ALuint * processed)()
al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers)
al.alDeleteBuffers(len(buffers), buffers)
# Pop timestamps and check for eos markers
try:
while processed:
if not _have_1_1:
self._timestamp_system_time = time.time()
_, duration = self._timestamps.pop(0)
self._buffered_time -= duration
while self._timestamps[0][0] is None:
self._eos_count += 1
self._timestamps.pop(0)
processed -= 1
except IndexError:
pass
if _have_1_1:
samples = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SAMPLE_OFFSET, samples)
self._current_buffer_time = samples.value / \
float(self.audio_format.sample_rate)
else:
# Interpolate system time past buffer timestamp
self._current_buffer_time = time.time() - \
self._timestamp_system_time
# Check for underrun
if self._playing:
state = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
al.alSourcePlay(self._al_source)
return True # underrun notification
def get_time(self):
state = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if not self._playing:
return self._pause_timestamp
if not self._timestamps:
return self._pause_timestamp
ts, _ = self._timestamps[0]
return ts + self._current_buffer_time
def clear_eos(self):
while self._eos_count > 0:
self._eos_count -= 1
return True
return False
def set_volume(self, volume):
al.alSourcef(self._al_source, al.AL_GAIN, max(0, volume))
def set_position(self, position):
x, y, z = position
al.alSource3f(self._al_source, al.AL_POSITION, x, y, z)
def set_min_distance(self, min_distance):
al.alSourcef(self._al_source, al.AL_REFERENCE_DISTANCE, min_distance)
def set_max_distance(self, max_distance):
al.alSourcef(self._al_source, al.AL_MAX_DISTANCE, max_distance)
def set_pitch(self, pitch):
al.alSourcef(self._al_source, al.AL_PITCH, max(0, pitch))
def set_cone_orientation(self, cone_orientation):
x, y, z = cone_orientation
al.alSource3f(self._al_source, al.AL_DIRECTION, x, y, z)
def set_cone_inner_angle(self, cone_inner_angle):
al.alSourcef(self._al_source, al.AL_CONE_INNER_ANGLE, cone_inner_angle)
def set_cone_outer_angle(self, cone_outer_angle):
al.alSourcef(self._al_source, al.AL_CONE_OUTER_ANGLE, cone_outer_angle)
def set_cone_outer_gain(self, cone_outer_gain):
al.alSourcef(self._al_source, al.AL_CONE_OUTER_GAIN, cone_outer_gain)
class OpenALListener(Listener):
def _set_volume(self, volume):
al.alListenerf(al.AL_GAIN, volume)
self._volume = volume
def _set_position(self, position):
x, y, z = position
al.alListener3f(al.AL_POSITION, x, y, z)
self._position = position
def _set_forward_orientation(self, orientation):
val = (al.ALfloat * 6)(*(orientation + self._up_orientation))
al.alListenerfv(al.AL_ORIENTATION, val)
self._forward_orientation = orientation
def _set_up_orientation(self, orientation):
val = (al.ALfloat * 6)(*(self._forward_orientation + orientation))
al.alListenerfv(al.AL_ORIENTATION, val)
self._up_orientation = orientation
_device = None
_have_1_1 = False
def driver_init(device_name = None):
global _device
global _have_1_1
# TODO devices must be enumerated on Windows, otherwise 1.0 context is
# returned.
_device = alc.alcOpenDevice(device_name)
if not _device:
raise OpenALException('No OpenAL device.')
alcontext = alc.alcCreateContext(_device, None)
alc.alcMakeContextCurrent(alcontext)
if have_version(1, 1):
# Good version info to cache
_have_1_1 = True
# See issue #163.
import sys
if sys.platform in ('win32', 'cygwin'):
from pyglet import clock
clock.Clock._force_sleep = True
driver_listener = OpenALListener()
driver_audio_player_class = OpenALAudioPlayer
|
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Restricted open-shell Hartree-Fock for periodic systems with k-point sampling
'''
from functools import reduce
import numpy as np
import scipy.linalg
from pyscf.scf import hf as mol_hf
from pyscf.pbc.scf import khf
from pyscf.pbc.scf import kuhf
from pyscf.pbc.scf import rohf as pbcrohf
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc.scf import addons
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'pbc_scf_analyze_with_meta_lowdin', True)
PRE_ORTH_METHOD = getattr(__config__, 'pbc_scf_analyze_pre_orth_method', 'ANO')
def make_rdm1(mo_coeff_kpts, mo_occ_kpts, **kwargs):
'''Alpha and beta spin one particle density matrices for all k-points.
Returns:
dm_kpts : (2, nkpts, nao, nao) ndarray
'''
dma = []
dmb = []
for k, occ in enumerate(mo_occ_kpts):
mo_a = mo_coeff_kpts[k][:,occ> 0]
mo_b = mo_coeff_kpts[k][:,occ==2]
dma.append(np.dot(mo_a, mo_a.conj().T))
dmb.append(np.dot(mo_b, mo_b.conj().T))
return lib.asarray((dma,dmb))
def get_fock(mf, h1e=None, s1e=None, vhf=None, dm=None, cycle=-1, diis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
h1e_kpts, s_kpts, vhf_kpts, dm_kpts = h1e, s1e, vhf, dm
if h1e_kpts is None: h1e_kpts = mf.get_hcore()
if vhf_kpts is None: vhf_kpts = mf.get_veff(mf.cell, dm_kpts)
focka = h1e_kpts + vhf_kpts[0]
fockb = h1e_kpts + vhf_kpts[1]
f_kpts = get_roothaan_fock((focka,fockb), dm, s1e)
if cycle < 0 and diis is None: # Not inside the SCF iteration
return f_kpts
if diis_start_cycle is None:
diis_start_cycle = mf.diis_start_cycle
if level_shift_factor is None:
level_shift_factor = mf.level_shift
if damp_factor is None:
damp_factor = mf.damp
if s_kpts is None: s_kpts = mf.get_ovlp()
if dm_kpts is None: dm_kpts = mf.make_rdm1()
dm_sf = dm_kpts[0] + dm_kpts[1]
if 0 <= cycle < diis_start_cycle-1 and abs(damp_factor) > 1e-4:
raise NotImplementedError('ROHF Fock-damping')
if diis and cycle >= diis_start_cycle:
f_kpts = diis.update(s_kpts, dm_sf, f_kpts, mf, h1e_kpts, vhf_kpts)
if abs(level_shift_factor) > 1e-4:
f_kpts = [mol_hf.level_shift(s, dm_sf[k]*.5, f_kpts[k], level_shift_factor)
for k, s in enumerate(s_kpts)]
f_kpts = lib.tag_array(lib.asarray(f_kpts), focka=focka, fockb=fockb)
return f_kpts
def get_roothaan_fock(focka_fockb, dma_dmb, s):
'''Roothaan's effective fock.
======== ======== ====== =========
space closed open virtual
======== ======== ====== =========
closed Fc Fb Fc
open Fb Fc Fa
virtual Fc Fa Fc
======== ======== ====== =========
where Fc = (Fa + Fb) / 2
Returns:
Roothaan effective Fock matrix
'''
nkpts = len(s)
nao = s[0].shape[0]
focka, fockb = focka_fockb
dma, dmb = dma_dmb
fock_kpts = []
for k in range(nkpts):
fc = (focka[k] + fockb[k]) * .5
pc = np.dot(dmb[k], s[k])
po = np.dot(dma[k]-dmb[k], s[k])
pv = np.eye(nao) - np.dot(dma[k], s[k])
fock = reduce(np.dot, (pc.conj().T, fc, pc)) * .5
fock += reduce(np.dot, (po.conj().T, fc, po)) * .5
fock += reduce(np.dot, (pv.conj().T, fc, pv)) * .5
fock += reduce(np.dot, (po.conj().T, fockb[k], pc))
fock += reduce(np.dot, (po.conj().T, focka[k], pv))
fock += reduce(np.dot, (pv.conj().T, fc, pc))
fock_kpts.append(fock + fock.conj().T)
fock_kpts = lib.tag_array(np.asarray(fock_kpts), focka=focka, fockb=fockb)
return fock_kpts
def get_occ(mf, mo_energy_kpts=None, mo_coeff_kpts=None):
'''Label the occupancies for each orbital for sampled k-points.
This is a k-point version of scf.hf.SCF.get_occ
'''
if mo_energy_kpts is None: mo_energy_kpts = mf.mo_energy
if getattr(mo_energy_kpts[0], 'mo_ea', None) is not None:
mo_ea_kpts = [x.mo_ea for x in mo_energy_kpts]
mo_eb_kpts = [x.mo_eb for x in mo_energy_kpts]
else:
mo_ea_kpts = mo_eb_kpts = mo_energy_kpts
nocc_a, nocc_b = mf.nelec
mo_energy_kpts1 = np.hstack(mo_energy_kpts)
mo_energy = np.sort(mo_energy_kpts1)
if nocc_b > 0:
core_level = mo_energy[nocc_b-1]
else:
core_level = -1e9
if nocc_a == nocc_b:
fermi = core_level
else:
mo_ea_kpts1 = np.hstack(mo_ea_kpts)
mo_ea = np.sort(mo_ea_kpts1[mo_energy_kpts1 > core_level])
fermi = mo_ea[nocc_a - nocc_b - 1]
mo_occ_kpts = []
for k, mo_e in enumerate(mo_energy_kpts):
occ = np.zeros_like(mo_e)
occ[mo_e <= core_level] = 2
if nocc_a != nocc_b:
occ[(mo_e > core_level) & (mo_ea_kpts[k] <= fermi)] = 1
mo_occ_kpts.append(occ)
if nocc_a < len(mo_energy):
logger.info(mf, 'HOMO = %.12g LUMO = %.12g',
mo_energy[nocc_a-1], mo_energy[nocc_a])
else:
logger.info(mf, 'HOMO = %.12g', mo_energy[nocc_a-1])
np.set_printoptions(threshold=len(mo_energy))
if mf.verbose >= logger.DEBUG:
logger.debug(mf, ' Roothaan | alpha | beta')
for k,kpt in enumerate(mf.cell.get_scaled_kpts(mf.kpts)):
core_idx = mo_occ_kpts[k] == 2
open_idx = mo_occ_kpts[k] == 1
vir_idx = mo_occ_kpts[k] == 0
logger.debug(mf, ' kpt %2d (%6.3f %6.3f %6.3f)',
k, kpt[0], kpt[1], kpt[2])
if np.count_nonzero(core_idx) > 0:
logger.debug(mf, ' Highest 2-occ = %18.15g | %18.15g | %18.15g',
max(mo_energy_kpts[k][core_idx]),
max(mo_ea_kpts[k][core_idx]), max(mo_eb_kpts[k][core_idx]))
if np.count_nonzero(vir_idx) > 0:
logger.debug(mf, ' Lowest 0-occ = %18.15g | %18.15g | %18.15g',
min(mo_energy_kpts[k][vir_idx]),
min(mo_ea_kpts[k][vir_idx]), min(mo_eb_kpts[k][vir_idx]))
for i in np.where(open_idx)[0]:
logger.debug(mf, ' 1-occ = %18.15g | %18.15g | %18.15g',
mo_energy_kpts[k][i], mo_ea_kpts[k][i], mo_eb_kpts[k][i])
logger.debug(mf, ' k-point Roothaan mo_energy')
for k,kpt in enumerate(mf.cell.get_scaled_kpts(mf.kpts)):
logger.debug(mf, ' %2d (%6.3f %6.3f %6.3f) %s %s',
k, kpt[0], kpt[1], kpt[2],
mo_energy_kpts[k][mo_occ_kpts[k]> 0],
mo_energy_kpts[k][mo_occ_kpts[k]==0])
if mf.verbose >= logger.DEBUG1:
logger.debug1(mf, ' k-point alpha mo_energy')
for k,kpt in enumerate(mf.cell.get_scaled_kpts(mf.kpts)):
logger.debug1(mf, ' %2d (%6.3f %6.3f %6.3f) %s %s',
k, kpt[0], kpt[1], kpt[2],
mo_ea_kpts[k][mo_occ_kpts[k]> 0],
mo_ea_kpts[k][mo_occ_kpts[k]==0])
logger.debug1(mf, ' k-point beta mo_energy')
for k,kpt in enumerate(mf.cell.get_scaled_kpts(mf.kpts)):
logger.debug1(mf, ' %2d (%6.3f %6.3f %6.3f) %s %s',
k, kpt[0], kpt[1], kpt[2],
mo_eb_kpts[k][mo_occ_kpts[k]==2],
mo_eb_kpts[k][mo_occ_kpts[k]!=2])
np.set_printoptions(threshold=1000)
return mo_occ_kpts
energy_elec = kuhf.energy_elec
dip_moment = kuhf.dip_moment
get_rho = kuhf.get_rho
@lib.with_doc(khf.mulliken_meta.__doc__)
def mulliken_meta(cell, dm_ao_kpts, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
'''Mulliken population analysis, based on meta-Lowdin AOs.
Note this function only computes the Mulliken population for the gamma
point density matrix.
'''
dm = dm_ao_kpts[0] + dm_ao_kpts[1]
return khf.mulliken_meta(cell, dm, verbose, pre_orth_method, s)
def canonicalize(mf, mo_coeff_kpts, mo_occ_kpts, fock=None):
'''Canonicalization diagonalizes the ROHF Fock matrix within occupied,
virtual subspaces separatedly (without change occupancy).
'''
if fock is None:
dm = mf.make_rdm1(mo_coeff_kpts, mo_occ_kpts)
fock = mf.get_fock(dm=dm)
mo_coeff = []
mo_energy = []
for k, mo in enumerate(mo_coeff_kpts):
mo1 = np.empty_like(mo)
mo_e = np.empty_like(mo_occ_kpts[k])
coreidx = mo_occ_kpts[k] == 2
openidx = mo_occ_kpts[k] == 1
viridx = mo_occ_kpts[k] == 0
for idx in (coreidx, openidx, viridx):
if np.count_nonzero(idx) > 0:
orb = mo[:,idx]
f1 = reduce(np.dot, (orb.T.conj(), fock[k], orb))
e, c = scipy.linalg.eigh(f1)
mo1[:,idx] = np.dot(orb, c)
mo_e[idx] = e
if getattr(fock, 'focka', None) is not None:
fa, fb = fock.focka[k], fock.fockb[k]
mo_ea = np.einsum('pi,pi->i', mo1.conj(), fa.dot(mo1)).real
mo_eb = np.einsum('pi,pi->i', mo1.conj(), fb.dot(mo1)).real
mo_e = lib.tag_array(mo_e, mo_ea=mo_ea, mo_eb=mo_eb)
mo_coeff.append(mo1)
mo_energy.append(mo_e)
return mo_energy, mo_coeff
init_guess_by_chkfile = kuhf.init_guess_by_chkfile
class KROHF(khf.KRHF, pbcrohf.ROHF):
'''UHF class with k-point sampling.
'''
conv_tol = getattr(__config__, 'pbc_scf_KSCF_conv_tol', 1e-7)
conv_tol_grad = getattr(__config__, 'pbc_scf_KSCF_conv_tol_grad', None)
direct_scf = getattr(__config__, 'pbc_scf_SCF_direct_scf', True)
def __init__(self, cell, kpts=np.zeros((1,3)),
exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald')):
khf.KSCF.__init__(self, cell, kpts, exxdiv)
self.nelec = None
@property
def nelec(self):
if self._nelec is not None:
return self._nelec
else:
cell = self.cell
nkpts = len(self.kpts)
ne = cell.tot_electrons(nkpts)
nalpha = (ne + cell.spin) // 2
nbeta = nalpha - cell.spin
if nalpha + nbeta != ne:
raise RuntimeError('Electron number %d and spin %d are not consistent\n'
'Note cell.spin = 2S = Nalpha - Nbeta, not 2S+1' %
(ne, cell.spin))
return nalpha, nbeta
@nelec.setter
def nelec(self, x):
self._nelec = x
def dump_flags(self, verbose=None):
khf.KSCF.dump_flags(self, verbose)
logger.info(self, 'number of electrons per unit cell '
'alpha = %d beta = %d', *self.nelec)
return self
#? def get_init_guess(self, cell=None, key='minao'):
#? dm_kpts = khf.KSCF.get_init_guess(self, cell, key)
#? if dm_kpts.ndim != 4: # The KRHF initial guess
#? # dm_kpts shape should be (spin, nkpts, nao, nao)
#? dm_kpts = lib.asarray([dm_kpts*.5,]*2)
#? return dm_kpts
#?
def get_init_guess(self, cell=None, key='minao'):
if cell is None:
cell = self.cell
dm_kpts = None
key = key.lower()
if key == '1e' or key == 'hcore':
dm_kpts = self.init_guess_by_1e(cell)
elif getattr(cell, 'natm', 0) == 0:
logger.info(self, 'No atom found in cell. Use 1e initial guess')
dm_kpts = self.init_guess_by_1e(cell)
elif key == 'atom':
dm = self.init_guess_by_atom(cell)
elif key[:3] == 'chk':
try:
dm_kpts = self.from_chk()
except (IOError, KeyError):
logger.warn(self, 'Fail to read %s. Use MINAO initial guess',
self.chkfile)
dm = self.init_guess_by_minao(cell)
else:
dm = self.init_guess_by_minao(cell)
if dm_kpts is None:
nkpts = len(self.kpts)
# dm[spin,nao,nao] at gamma point -> dm_kpts[spin,nkpts,nao,nao]
dm_kpts = np.repeat(dm[:,None,:,:], nkpts, axis=1)
ne = np.einsum('xkij,kji->', dm_kpts, self.get_ovlp(cell)).real
# FIXME: consider the fractional num_electron or not? This maybe
# relates to the charged system.
nkpts = len(self.kpts)
nelec = float(sum(self.nelec))
if np.any(abs(ne - nelec) > 1e-7*nkpts):
logger.debug(self, 'Big error detected in the electron number '
'of initial guess density matrix (Ne/cell = %g)!\n'
' This can cause huge error in Fock matrix and '
'lead to instability in SCF for low-dimensional '
'systems.\n DM is normalized wrt the number '
'of electrons %g', ne/nkpts, nelec/nkpts)
dm_kpts *= nelec / ne
return dm_kpts
init_guess_by_minao = pbcrohf.ROHF.init_guess_by_minao
init_guess_by_atom = pbcrohf.ROHF.init_guess_by_atom
init_guess_by_huckel = pbcrohf.ROHF.init_guess_by_huckel
get_rho = get_rho
get_fock = get_fock
get_occ = get_occ
energy_elec = energy_elec
def get_veff(self, cell=None, dm_kpts=None, dm_last=0, vhf_last=0, hermi=1,
kpts=None, kpts_band=None):
if dm_kpts is None:
dm_kpts = self.make_rdm1()
if getattr(dm_kpts, 'mo_coeff', None) is not None:
mo_coeff = dm_kpts.mo_coeff
mo_occ_a = [(x > 0).astype(np.double) for x in dm_kpts.mo_occ]
mo_occ_b = [(x ==2).astype(np.double) for x in dm_kpts.mo_occ]
dm_kpts = lib.tag_array(dm_kpts, mo_coeff=(mo_coeff,mo_coeff),
mo_occ=(mo_occ_a,mo_occ_b))
if self.rsjk and self.direct_scf:
ddm = dm_kpts - dm_last
vj, vk = self.get_jk(cell, ddm, hermi, kpts, kpts_band)
vhf = vj[0] + vj[1] - vk
vhf += vhf_last
else:
vj, vk = self.get_jk(cell, dm_kpts, hermi, kpts, kpts_band)
vhf = vj[0] + vj[1] - vk
return vhf
def get_grad(self, mo_coeff_kpts, mo_occ_kpts, fock=None):
if fock is None:
dm1 = self.make_rdm1(mo_coeff_kpts, mo_occ_kpts)
fock = self.get_hcore(self.cell, self.kpts) + self.get_veff(self.cell, dm1)
if getattr(fock, 'focka', None) is not None:
focka = fock.focka
fockb = fock.fockb
elif getattr(fock, 'ndim', None) == 4:
focka, fockb = fock
else:
focka = fockb = fock
def grad(k):
mo_occ = mo_occ_kpts[k]
mo_coeff = mo_coeff_kpts[k]
return pbcrohf.get_grad(mo_coeff, mo_occ, (focka[k], fockb[k]))
nkpts = len(self.kpts)
grad_kpts = np.hstack([grad(k) for k in range(nkpts)])
return grad_kpts
def eig(self, fock, s):
e, c = khf.KSCF.eig(self, fock, s)
if getattr(fock, 'focka', None) is not None:
for k, mo in enumerate(c):
fa, fb = fock.focka[k], fock.fockb[k]
mo_ea = np.einsum('pi,pi->i', mo.conj(), fa.dot(mo)).real
mo_eb = np.einsum('pi,pi->i', mo.conj(), fb.dot(mo)).real
e[k] = lib.tag_array(e[k], mo_ea=mo_ea, mo_eb=mo_eb)
return e, c
def make_rdm1(self, mo_coeff_kpts=None, mo_occ_kpts=None, **kwargs):
if mo_coeff_kpts is None: mo_coeff_kpts = self.mo_coeff
if mo_occ_kpts is None: mo_occ_kpts = self.mo_occ
return make_rdm1(mo_coeff_kpts, mo_occ_kpts, **kwargs)
def init_guess_by_chkfile(self, chk=None, project=True, kpts=None):
if chk is None: chk = self.chkfile
if kpts is None: kpts = self.kpts
return init_guess_by_chkfile(self.cell, chk, project, kpts)
def analyze(self, verbose=None, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
if verbose is None: verbose = self.verbose
return khf.analyze(self, verbose, with_meta_lowdin, **kwargs)
def mulliken_meta(self, cell=None, dm=None, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
if s is None: s = self.get_ovlp(cell)
return mulliken_meta(cell, dm, s=s, verbose=verbose,
pre_orth_method=pre_orth_method)
@lib.with_doc(dip_moment.__doc__)
def dip_moment(self, cell=None, dm=None, unit='Debye', verbose=logger.NOTE,
**kwargs):
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
rho = kwargs.pop('rho', None)
if rho is None:
rho = self.get_rho(dm)
return dip_moment(cell, dm, unit, verbose, rho=rho, kpts=self.kpts, **kwargs)
spin_square = pbcrohf.ROHF.spin_square
canonicalize = canonicalize
def stability(self,
internal=getattr(__config__, 'pbc_scf_KSCF_stability_internal', True),
external=getattr(__config__, 'pbc_scf_KSCF_stability_external', False),
verbose=None):
raise NotImplementedError
def convert_from_(self, mf):
'''Convert given mean-field object to KUHF'''
addons.convert_to_rhf(mf, self)
return self
del(WITH_META_LOWDIN, PRE_ORTH_METHOD)
if __name__ == '__main__':
from pyscf.pbc import gto
cell = gto.Cell()
cell.atom = '''
He 0 0 1
He 1 0 1
'''
cell.basis = '321g'
cell.a = np.eye(3) * 3
cell.mesh = [11] * 3
cell.verbose = 5
cell.spin = 2
cell.build()
mf = KROHF(cell, [2,1,1])
mf.kernel()
mf.analyze()
|
|
# -*- coding: utf-8 -*-
"""
This code isn't to be called directly, but is the core logic of the KaplanMeierFitter.fit_interval_censoring
References
https://upcommons.upc.edu/bitstream/handle/2117/93831/01Rop01de01.pdf
https://docs.ufpr.br/~giolo/CE063/Artigos/A4_Gomes%20et%20al%202009.pdf
"""
from collections import defaultdict, namedtuple
import warnings
import numpy as np
from numpy.linalg import norm
import pandas as pd
from lifelines.exceptions import ConvergenceWarning
from typing import *
interval = namedtuple("Interval", ["left", "right"])
class min_max:
"""
Keep only the min/max of streaming values
"""
def __init__(self):
self.min = np.inf
self.max = -np.inf
def add(self, value: float):
if value > self.max:
self.max = value
if value < self.min:
self.min = value
def __iter__(self):
yield self.min
yield self.max
def temper(i: int, optimize) -> float:
if optimize:
return 0.9 * (2 * np.arctan(i / 100) / np.pi) + 1
else:
return 1.0
def E_step_M_step(observation_intervals, p_old, turnbull_interval_lookup, weights, i, optimize) -> np.ndarray:
"""
See [1], but also modifications.
References
-----------
1. Clifford Anderson-Bergman (2016): An efficient implementation of the
EMICM algorithm for the interval censored NPMLE, Journal of Computational and Graphical
Statistics, DOI: 10.1080/10618600.2016.1208616
"""
N = 0
m = np.zeros_like(p_old)
P = cumulative_sum(p_old)
for observation_interval, w in zip(observation_intervals, weights):
# find all turnbull intervals, t, that are contained in (ol, or). Call this set T
# the denominator is sum of p_old[T] probabilities
# the numerator is p_old[t]
min_, max_ = turnbull_interval_lookup[observation_interval]
m[min_ : max_ + 1] += w / (P[max_ + 1] - P[min_]).sum()
N += w
p_new = p_old * (m / N) ** temper(i, optimize)
p_new /= p_new.sum()
return p_new
def cumulative_sum(p: np.ndarray) -> np.ndarray:
# return np.insert(p, 0, 0).cumsum()
return np.concatenate((np.zeros(1), p)).cumsum()
def create_turnbull_intervals(left, right) -> List[interval]:
"""
obs are []
turnbulls are []
"""
left = [[l, "l"] for l in left]
right = [[r, "r"] for r in right]
union = sorted(left + right)
intervals = []
for e1, e2 in zip(union, union[1:]):
if e1[1] == "l" and e2[1] == "r":
intervals.append(interval(e1[0], e2[0]))
return intervals
def is_subset(query_interval: interval, super_interval: interval) -> bool:
"""
assumes query_interval is [], and super_interval is (]
"""
return super_interval.left <= query_interval.left and query_interval.right <= super_interval.right
def create_turnbull_lookup(
turnbull_intervals: List[interval], observation_intervals: List[interval]
) -> Dict[interval, List[interval]]:
turnbull_lookup = defaultdict(min_max)
for i, turnbull_interval in enumerate(turnbull_intervals):
# ask: which observations is this t_interval part of?
for observation_interval in observation_intervals:
# since left and right are sorted by left, we can stop after left > turnbull_interval[1] value
if observation_interval.left > turnbull_interval.right:
break
if is_subset(turnbull_interval, observation_interval):
turnbull_lookup[observation_interval].add(i)
return {o: list(s) for o, s in turnbull_lookup.items()}
def check_convergence(
p_new: np.ndarray,
p_old: np.ndarray,
turnbull_lookup: Dict[interval, List[interval]],
weights: np.ndarray,
tol: float,
i: int,
verbose=False,
) -> bool:
old_ll = log_likelihood(p_old, turnbull_lookup, weights)
new_ll = log_likelihood(p_new, turnbull_lookup, weights)
delta = new_ll - old_ll
if verbose:
print("Iteration %d " % i)
print(" delta log-likelihood: %.10f" % delta)
print(" log-like: %.6f" % log_likelihood(p_new, turnbull_lookup, weights))
if (delta < tol) and (delta >= 0):
return True
return False
def create_observation_intervals(obs) -> List[interval]:
return [interval(l, r) for l, r in obs]
def log_odds(p: np.ndarray) -> np.ndarray:
return np.log(p) - np.log(1 - p)
def probs(log_odds: np.ndarray) -> np.ndarray:
o = np.exp(log_odds)
return o / (o + 1)
def npmle(left, right, tol=1e-7, weights=None, verbose=False, max_iter=1e5, optimize=False, fit_method="em"):
"""
left and right are closed intervals.
TODO: extend this to open-closed intervals.
"""
left, right = np.asarray(left), np.asarray(right)
if weights is None:
weights = np.ones_like(left)
# perform a group by to get unique observations and weights
df_ = pd.DataFrame({"l": left, "r": right, "w": weights}).groupby(["l", "r"]).sum()
weights = df_["w"].values
unique_obs = df_.index.values
# create objects needed
turnbull_intervals = create_turnbull_intervals(left, right)
observation_intervals = create_observation_intervals(unique_obs)
turnbull_lookup = create_turnbull_lookup(turnbull_intervals, observation_intervals)
if fit_method == "em":
p = expectation_maximization_fit(
observation_intervals, turnbull_intervals, turnbull_lookup, weights, tol, max_iter, optimize, verbose
)
elif fit_method == "scipy":
p = scipy_minimize_fit(turnbull_lookup, turnbull_intervals, weights, tol, verbose)
return p, turnbull_intervals
def scipy_minimize_fit(turnbull_interval_lookup, turnbull_intervals, weights, tol, verbose):
import autograd.numpy as anp
from autograd import value_and_grad
from scipy.optimize import minimize
def cumulative_sum(p):
return anp.concatenate((anp.zeros(1), p)).cumsum()
def negative_log_likelihood(p, turnbull_interval_lookup, weights):
P = cumulative_sum(p)
ix = anp.array(list(turnbull_interval_lookup.values()))
return -(weights * anp.log(P[ix[:, 1] + 1] - P[ix[:, 0]])).sum()
def con(p):
return p.sum() - 1
# initialize to equal weight
T = len(turnbull_intervals)
p = 1 / T * np.ones(T)
cons = {"type": "eq", "fun": con}
results = minimize(
value_and_grad(negative_log_likelihood),
args=(turnbull_interval_lookup, weights),
x0=p,
bounds=[(0, 1)] * T,
jac=True,
constraints=cons,
tol=tol,
options={"disp": verbose},
)
return results.x
def expectation_maximization_fit(
observation_intervals, turnbull_intervals, turnbull_lookup, weights, tol, max_iter, optimize, verbose
):
# convergence init
converged = False
i = 0
# initialize to equal weight
T = len(turnbull_intervals)
p = 1 / T * np.ones(T)
while (not converged) and (i < max_iter):
new_p = E_step_M_step(observation_intervals, p, turnbull_lookup, weights, i, optimize)
converged = check_convergence(new_p, p, turnbull_lookup, weights, tol, i, verbose=verbose)
# find alpha that maximizes ll using a line search
best_p, best_ll = p, -np.inf
delta = log_odds(new_p) - log_odds(p)
for alpha in np.array([1.0, 1.25, 1.95]):
p_temp = probs(log_odds(p) + alpha * delta)
ll_temp = log_likelihood(p_temp, turnbull_lookup, weights)
if best_ll < ll_temp:
best_ll = ll_temp
best_p = p_temp
p = best_p
i += 1
if i >= max_iter:
warnings.warn("Exceeded max iterations.", ConvergenceWarning)
return p
def log_likelihood(p: np.ndarray, turnbull_interval_lookup, weights) -> float:
P = cumulative_sum(p)
ix = np.array(list(turnbull_interval_lookup.values()))
return (weights * np.log(P[ix[:, 1] + 1] - P[ix[:, 0]])).sum()
def reconstruct_survival_function(
probabilities: np.ndarray, turnbull_intervals: List[interval], timeline=None, label="NPMLE"
) -> pd.DataFrame:
if timeline is None:
timeline = []
index = np.unique(np.concatenate((turnbull_intervals, [(0, 0)])))
label_upper = label + "_upper"
label_lower = label + "_lower"
df = pd.DataFrame([], index=index, columns=[label_upper, label_lower])
running_sum = 1.0
# the below values may be overwritten later, but we
# always default to starting at point (0, 1)
df.loc[0, label_upper] = running_sum
df.loc[0, label_lower] = running_sum
for p, (left, right) in zip(probabilities, turnbull_intervals):
df.loc[left, label_upper] = running_sum
df.loc[left, label_lower] = running_sum
if left != right:
df.loc[right, label_upper] = running_sum
df.loc[right, label_lower] = running_sum - p
running_sum -= p
full_dataframe = pd.DataFrame(index=timeline, columns=df.columns)
# First backfill at events between known observations
# Second fill all events _outside_ known obs with running_sum
return full_dataframe.combine_first(df).bfill().fillna(running_sum).clip(lower=0.0)
def npmle_compute_confidence_intervals(left, right, mle_, alpha=0.05, samples=1000):
"""
uses basic bootstrap
"""
left, right = np.asarray(left, dtype=float), np.asarray(right, dtype=float)
all_times = np.unique(np.concatenate((left, right, [0])))
N = left.shape[0]
bootstrapped_samples = np.empty((all_times.shape[0], samples))
for i in range(samples):
ix = np.random.randint(low=0, high=N, size=N)
left_ = left[ix]
right_ = right[ix]
bootstrapped_samples[:, i] = reconstruct_survival_function(*npmle(left_, right_), all_times).values[:, 0]
return (
2 * mle_.squeeze() - pd.Series(np.percentile(bootstrapped_samples, (alpha / 2) * 100, axis=1), index=all_times),
2 * mle_.squeeze() - pd.Series(np.percentile(bootstrapped_samples, (1 - alpha / 2) * 100, axis=1), index=all_times),
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
class Categorical(distribution.Distribution):
"""Categorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes.
#### Examples
Creates a 3-class distiribution, with the 2nd class, the most likely to be
drawn from.
```python
p = [0.1, 0.5, 0.4]
dist = Categorical(p=p)
```
Creates a 3-class distiribution, with the 2nd class the most likely to be
drawn from, using logits.
```python
logits = [-50, 400, 40]
dist = Categorical(logits=logits)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = Categorical(p=p)
dist.pmf(0) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts.
counts = [1, 0]
dist.pmf(counts) # Shape [2]
# p will be broadcast to shape [3, 5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.pmf(counts) # Shape [5, 7, 3]
```
"""
def __init__(
self,
logits=None,
p=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`p` should be passed in.
p: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
`logits` or `p` should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Unused in this distribution.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
"""
with ops.name_scope(name, values=[logits]) as ns:
self._logits, self._p = distribution_util.get_logits_and_prob(
name=name, logits=logits, p=p, validate_args=validate_args,
multidimensional=True)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
logits_shape = array_ops.shape(self._logits, name="logits_shape")
if logits_shape_static[-1].value is not None:
self._num_classes = ops.convert_to_tensor(
logits_shape_static[-1].value,
dtype=dtypes.int32,
name="num_classes")
else:
self._num_classes = array_ops.gather(logits_shape,
self._batch_rank,
name="num_classes")
if logits_shape_static[:-1].is_fully_defined():
self._batch_shape_val = constant_op.constant(
logits_shape_static[:-1].as_list(),
dtype=dtypes.int32,
name="batch_shape")
else:
with ops.name_scope(name="batch_shape"):
self._batch_shape_val = logits_shape[:-1]
super(Categorical, self).__init__(
dtype=dtype,
parameters={"logits": self._logits, "num_classes": self._num_classes},
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
@property
def num_classes(self):
"""Scalar `int32` tensor: the number of classes."""
return self._num_classes
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def p(self):
"""Vector of probabilities summing to one.
Each element is the probability of drawing that coordinate."""
return self._p
def _batch_shape(self):
# Use identity to inherit callers "name".
return array_ops.identity(self._batch_shape_val)
def _get_batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(0, ([n], self.batch_shape())))
return ret
def _log_prob(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.logits.get_shape()[:-1] == k.get_shape():
logits = self.logits
else:
logits = self.logits * array_ops.ones_like(
array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
logits_shape = array_ops.shape(logits)[:-1]
k *= array_ops.ones(logits_shape, dtype=k.dtype)
k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
return -nn_ops.sparse_softmax_cross_entropy_with_logits(logits, k)
def _prob(self, k):
return math_ops.exp(self._log_prob(k))
def _entropy(self):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
histogram_2d = nn_ops.softmax(logits_2d)
ret = array_ops.reshape(
nn_ops.softmax_cross_entropy_with_logits(logits_2d, histogram_2d),
self.batch_shape())
ret.set_shape(self.get_batch_shape())
return ret
def _mode(self):
ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.get_batch_shape())
return ret
|
|
"""
Module that contains many useful utilities
for validating data or function arguments
"""
import warnings
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = 'argument' if max_arg_count == 1 else 'arguments'
raise TypeError(
"{fname}() takes at most {max_arg} {argument} "
"({given_arg} given)".format(
fname=fname, max_arg=max_arg_count,
argument=argument, given_arg=actual_arg_count))
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg)))
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
kwargs: dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
is only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : purely args validation
validate_kwargs : purely kwargs validation
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(fname, args + tuple(kwargs.values()),
max_fname_arg_count, compat_args)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError("{fname}() got multiple values for keyword "
"argument '{arg}'".format(fname=fname, arg=key))
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame or Panel
arg : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO(PY3): Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = ("{} got multiple values for argument "
"'{}'".format(method_name, arg_name))
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = args[0]
elif len(args) == 2:
if 'axis' in kwargs:
# Unambiguously wrong
msg = ("Cannot specify both 'axis' and any of 'index' "
"or 'columns'")
raise TypeError(msg)
msg = ("Interpreting call\n\t'.{method_name}(a, b)' as "
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
" a 'TypeError'.")
warnings.warn(msg.format(method_name=method_name,), FutureWarning,
stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
out[data._AXIS_NAMES[1]] = args[1]
else:
msg = "Cannot specify all of '{}', 'index', 'columns'."
raise TypeError(msg.format(arg_name))
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
|
|
from sympy.core import (S, symbols, Eq, pi, Catalan, EulerGamma, Lambda,
Dummy, Function)
from sympy.core.compatibility import StringIO
from sympy import erf, Integral, Piecewise
from sympy import Equality
from sympy.matrices import Matrix, MatrixSymbol
from sympy.printing.codeprinter import Assignment
from sympy.utilities.codegen import OctaveCodeGen, codegen, make_routine
from sympy.utilities.pytest import raises
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import XFAIL
import sympy
x, y, z = symbols('x,y,z')
def test_empty_m_code():
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([], output, "file", header=False, empty=False)
source = output.getvalue()
assert source == ""
def test_m_simple_code():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_with_header():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" %TEST Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_nameout():
expr = Equality(z, (x + y))
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function z = test(x, y)\n"
" z = x + y;\n"
"end\n"
)
assert source == expected
def test_m_numbersymbol():
name_expr = ("test", pi**Catalan)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test()\n"
" out1 = pi^0.915965594177219;\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_numbersymbol_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
name_expr = ("test", [pi**Catalan, EulerGamma])
result, = codegen(name_expr, "Octave", header=False,
empty=False, inline=False)
source = result[1]
expected = (
"function [out1, out2] = test()\n"
" Catalan = 0.915965594177219; % constant\n"
" EulerGamma = 0.5772156649015329; % constant\n"
" out1 = pi^Catalan;\n"
" out2 = EulerGamma;\n"
"end\n"
)
assert source == expected
def test_m_code_argument_order():
expr = x + y
routine = make_routine("test", expr, argument_sequence=[z, x, y], language="octave")
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([routine], output, "test", header=False, empty=False)
source = output.getvalue()
expected = (
"function out1 = test(z, x, y)\n"
" out1 = x + y;\n"
"end\n"
)
assert source == expected
def test_multiple_results_m():
# Here the output order is the input order
expr1 = (x + y)*z
expr2 = (x - y)*z
name_expr = ("test", [expr1, expr2])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
" out2 = z.*(x - y);\n"
"end\n"
)
assert source == expected
def test_results_named_unordered():
# Here output order is based on name_expr
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [C, A, B] = test(x, y, z)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_results_named_ordered():
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_complicated_m_codegen():
from sympy import sin, cos, tan
name_expr = ("testlong",
[ ((sin(x) + cos(y) + tan(z))**3).expand(),
cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))
])
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "testlong.m"
source = result[0][1]
expected = (
"function [out1, out2] = testlong(x, y, z)\n"
" out1 = sin(x).^3 + 3*sin(x).^2.*cos(y) + 3*sin(x).^2.*tan(z)"
" + 3*sin(x).*cos(y).^2 + 6*sin(x).*cos(y).*tan(z) + 3*sin(x).*tan(z).^2"
" + cos(y).^3 + 3*cos(y).^2.*tan(z) + 3*cos(y).*tan(z).^2 + tan(z).^3;\n"
" out2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n"
"end\n"
)
assert source == expected
def test_m_output_arg_mixed_unordered():
# named outputs are alphabetical, unnamed output appear in the given order
from sympy import sin, cos, tan
a = symbols("a")
name_expr = ("foo", [cos(2*x), Equality(y, sin(x)), cos(x), Equality(a, sin(2*x))])
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "foo.m"
source = result[1];
expected = (
'function [out1, y, out3, a] = foo(x)\n'
' out1 = cos(2*x);\n'
' y = sin(x);\n'
' out3 = cos(x);\n'
' a = sin(2*x);\n'
'end\n'
)
assert source == expected
def test_m_piecewise_():
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" out1 = ((x < -1).*(0) + (~(x < -1)).*( ...\n"
" (x <= 1).*(x.^2) + (~(x <= 1)).*( ...\n"
" (x > 1).*(-x + 2) + (~(x > 1)).*(1))));\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_piecewise_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False,
inline=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" if (x < -1)\n"
" out1 = 0;\n"
" elseif (x <= 1)\n"
" out1 = x.^2;\n"
" elseif (x > 1)\n"
" out1 = -x + 2;\n"
" else\n"
" out1 = 1;\n"
" end\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file_w_header():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" %FOO Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_filename_match_first_fcn():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
raises(ValueError, lambda: codegen(name_expr,
"Octave", prefix="bar", header=False, empty=False))
def test_m_matrix_named():
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(MatrixSymbol('myout1', 1, 3), e2))
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_named_matsym():
myout1 = MatrixSymbol('myout1', 1, 3)
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(myout1, e2, evaluate=False))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname():
expr = Matrix([[x, x+y, 3]])
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test(x, y)\n"
" out1 = [x x + y 3];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname_2():
e1 = (x + y)
e2 = Matrix([[2*x, 2*y, 2*z]])
e3 = Matrix([[x], [y], [z]])
e4 = Matrix([[x, y], [z, 16]])
name_expr = ("test", (e1, e2, e3, e4))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2, out3, out4] = test(x, y, z)\n"
" out1 = x + y;\n"
" out2 = [2*x 2*y 2*z];\n"
" out3 = [x; y; z];\n"
" out4 = [x y;\n"
" z 16];\n"
"end\n"
)
assert source == expected
def test_m_results_matrix_named_ordered():
B, C = symbols('B,C')
A = MatrixSymbol('A', 1, 3)
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, Matrix([[1, 2, x]]))
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
source = result[1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = [1 2 x];\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
C = MatrixSymbol('C', 1, 3)
D = MatrixSymbol('D', 2, 1)
name_expr = ("test", [Equality(B, A[0, :]),
Equality(C, A[1, :]),
Equality(D, A[:, 2])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C, D] = test(A)\n"
" B = A(1, :);\n"
" C = A(2, :);\n"
" D = A(:, 3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice2():
A = MatrixSymbol('A', 3, 4)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 2, 2)
name_expr = ("test", [Equality(B, A[0:2, 0:2]),
Equality(C, A[0:2, 1:3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(1:2, 1:2);\n"
" C = A(1:2, 2:3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice3():
A = MatrixSymbol('A', 8, 7)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 4, 2)
name_expr = ("test", [Equality(B, A[6:, 1::3]),
Equality(C, A[::2, ::3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(7:end, 2:3:end);\n"
" C = A(1:2:end, 1:3:end);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice_autoname():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
name_expr = ("test", [Equality(B, A[0,:]), A[1,:], A[:,0], A[:,1]])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, out2, out3, out4] = test(A)\n"
" B = A(1, :);\n"
" out2 = A(2, :);\n"
" out3 = A(:, 1);\n"
" out4 = A(:, 2);\n"
"end\n"
)
assert source == expected
def test_m_loops():
# Note: an Octave programmer would probably vectorize this across one or
# more dimensions. Also, size(A) would be used rather than passing in m
# and n. Perhaps users would expect us to vectorize automatically here?
# Or is it possible to represent such things using IndexedBase?
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
result, = codegen(('mat_vec_mult', Eq(y[i], A[i, j]*x[j])), "Octave",
header=False, empty=False)
source = result[1]
expected = (
'function y = mat_vec_mult(A, m, n, x)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' y(i) = %(rhs)s + y(i);\n'
' end\n'
' end\n'
'end\n'
)
assert (source == expected % {'rhs': 'A(%s, %s).*x(j)' % (i, j)} or
source == expected % {'rhs': 'x(j).*A(%s, %s)' % (i, j)})
def test_m_tensor_loops_multiple_contractions():
# see comments in previous test about vectorizing
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
A = IndexedBase('A')
B = IndexedBase('B')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
result, = codegen(('tensorthing', Eq(y[i], B[j, k, l]*A[i, j, k, l])),
"Octave", header=False, empty=False)
source = result[1]
expected = (
'function y = tensorthing(A, B, m, n, o, p)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' for k = 1:o\n'
' for l = 1:p\n'
' y(i) = y(i) + B(j, k, l).*A(i, j, k, l);\n'
' end\n'
' end\n'
' end\n'
' end\n'
'end\n'
)
assert source == expected
def test_m_InOutArgument():
expr = Equality(x, x**2)
name_expr = ("mysqr", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = mysqr(x)\n"
" x = x.^2;\n"
"end\n"
)
assert source == expected
def test_m_InOutArgument_order():
# can specify the order as (x, y)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False,
empty=False, argument_sequence=(x,y))
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
# make sure it gives (x, y) not (y, x)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
def test_m_not_supported():
f = Function('f')
name_expr = ("test", [f(x).diff(x), S.ComplexInfinity])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x)\n"
" % unsupported: Derivative(f(x), x)\n"
" % unsupported: zoo\n"
" out1 = Derivative(f(x), x);\n"
" out2 = zoo;\n"
"end\n"
)
assert source == expected
|
|
#!/usr/bin/env python
'''
Created on 4 Feb 2010
@author: tcezard
'''
import os, sys, logging
from optparse import OptionParser
import utils
from utils import utils_param, utils_logging, compare_version_number,\
get_bwa_version, longest_common_substr_from_start
import command_runner
from utils.GenomeLoader import GenomeLoader
from utils.parameters import Config_file_error
PREPARE_GENOME='prepare_genome'
ALIGN_READS='align_reads'
RUN_TYPE=[PREPARE_GENOME,ALIGN_READS]
ANALYSIS_DIGITAL_TRANSC='digital_transc'
ANALYSIS_RNA_SEQ='rna_seq'
ANALYSIS_RAD_SEQ='rad'
ANALYSIS_TYPE=[ANALYSIS_DIGITAL_TRANSC,ANALYSIS_RNA_SEQ,ANALYSIS_RAD_SEQ]
def prepare_genome(genome_file,color_space=False):
run_fine=True
pipeline_param=utils_param.get_pipeline_parameters()
BWA_dir=pipeline_param.get_bwa_dir()
BWA_bin=os.path.join(BWA_dir,'bwa')
genome_loader = GenomeLoader(genome_file=genome_file)
length=0
for fasta_rec in genome_loader:
header, sequence = fasta_rec
length+=len(sequence)
if length>1000000000:
break
genome_loader.close()
#Following recommendation set the indexing algorithm to is if genome is <10M
if length>1000000000:
a_option='bwtsw'
else:
a_option='is'
#Create the indexes
if color_space:
command='%s index -c -a %s %s'%(BWA_bin, a_option, genome_file)
else:
command='%s index -a %s %s'%(BWA_bin, a_option, genome_file)
command_runner.run_command(command)
return run_fine
def run_BWA_Command(genome_file, fastq_file1, fastq_file2=None, output_dir=None, sample_name=None,
clean_up=True, sort=False, thread=1, analysis_type=None, read_group=None, illumina=False):
run_fine=True
try:
pipeline_param=utils_param.get_pipeline_parameters()
BWA_dir=pipeline_param.get_bwa_dir()
samtools_dir=pipeline_param.get_samtools_dir()
picard_dir=pipeline_param.get_picard_dir()
except Config_file_error, e:
logging.exception('Config_file_error:')
logging.warning("You'll need to have bwa and samtools in your path")
BWA_dir=''
samtools_dir=''
picard_dir=None
files_and_dir=[]
if fastq_file1.endswith('.gz'):
fastq_file1_unzip,ext = os.path.splitext(fastq_file1)
command = 'gunzip -c %s > %s'%(fastq_file1,fastq_file1_unzip)
return_code = command_runner.run_command(command)
if return_code is not 0:
run_fine = False
fastq_file1=fastq_file1_unzip
files_and_dir.append(fastq_file1)
if fastq_file2 and fastq_file2.endswith('.gz'):
fastq_file2_unzip,ext = os.path.splitext(fastq_file2)
command = 'gunzip -c %s > %s'%(fastq_file2,fastq_file2_unzip)
return_code = command_runner.run_command(command)
if return_code is not 0:
run_fine = False
fastq_file2=fastq_file2_unzip
files_and_dir.append(fastq_file2)
#Get the sample name
if not sample_name:
if fastq_file2:
fastq_common = longest_common_substr_from_start(fastq_file1,fastq_file2)
#remove trailing underscore _ and get the base name
sample_name = os.path.basename(fastq_common.rstrip('_'))
else:
tmp,ext=os.path.splitext(os.path.basename(fastq_file1))
tmp=tmp.rstrip('1')
sample_name = os.path.basename(tmp.rstrip('_'))
BWA_bin=os.path.join(BWA_dir,'bwa')
# Check bwa version do not allow read group before version 0.9
if compare_version_number(get_bwa_version(BWA_bin), "0.5.9") <0:
logging.warning("Your version of bwa does not support the read group. Get version 0.5.9 or later to use this function.")
read_group_command=''
else:
if read_group:
read_group_command='-r "%s"'%(read_group)
elif read_group is None:
read_group_element=[]
read_group_element.append("@RG")
read_group_element.append("ID:%s"%sample_name)
read_group_element.append("LB:%s"%sample_name)
read_group_element.append("CN:The Genepool")
read_group_element.append("PL:ILLUMINA")
read_group_element.append("SM:%s"%sample_name)
read_group_command= '-r "%s"'%('\\t'.join(read_group_element))
else:
read_group_command=''
samtools_bin=os.path.join(samtools_dir,'samtools')
if output_dir is None:
output_dir=os.path.dirname(fastq_file1)
fastq_name, ext=os.path.splitext(os.path.basename(fastq_file1))
sai_file1='%s.sai'%os.path.join(output_dir,fastq_name)
illumina_str=""
if illumina:
illumina_str=" -I "
command='%s aln %s -t %s %s %s > %s'%(BWA_bin, illumina_str,thread, genome_file, fastq_file1, sai_file1)
if analysis_type == ANALYSIS_DIGITAL_TRANSC:
#disable indels
command='%s aln %s -o 0 -t %s %s %s > %s'%(BWA_bin, illumina_str, thread, genome_file, fastq_file1, sai_file1)
return_code = command_runner.run_command(command)
if return_code is not 0:
run_fine = False
files_and_dir.append(sai_file1)
if fastq_file2:
fastq_name, ext=os.path.splitext(os.path.basename(fastq_file2))
sai_file2='%s.sai'%os.path.join(output_dir,fastq_name)
command='%s aln %s -t %s %s %s > %s'%(BWA_bin, illumina_str, thread, genome_file, fastq_file2, sai_file2)
return_code = command_runner.run_command(command)
files_and_dir.append(sai_file2)
if return_code is not 0:
run_fine = False
bam_file=os.path.join(output_dir, sample_name+'.bam')
if fastq_file2:
if analysis_type == ANALYSIS_RNA_SEQ:
command='%s sampe -a 20000 %s %s %s %s %s %s | %s view -bS - > %s'%(BWA_bin, read_group_command, genome_file, sai_file1, sai_file2,
fastq_file1, fastq_file2, samtools_bin, bam_file)
elif analysis_type == ANALYSIS_RAD_SEQ:
command='%s sampe -A %s %s %s %s %s %s | %s view -bS - > %s'%(BWA_bin, read_group_command, genome_file, sai_file1, sai_file2, fastq_file1,
fastq_file2, samtools_bin, bam_file)
else:
command='%s sampe %s %s %s %s %s %s | %s view -bS - > %s'%(BWA_bin, read_group_command, genome_file, sai_file1, sai_file2, fastq_file1,
fastq_file2, samtools_bin, bam_file)
else:
command='%s samse %s %s %s %s | %s view -bS - > %s'%(BWA_bin, read_group_command, genome_file, sai_file1, fastq_file1, samtools_bin,
bam_file)
return_code = command_runner.run_command( command)
if return_code is not 0:
run_fine = False
if sort:
files_and_dir.append(bam_file)
if picard_dir:
sorted_bam_file=os.path.join(output_dir,sample_name+'_sorted.bam')
return_code = utils.sort_bam_file_per_coordinate(picard_dir, bam_file, sorted_bam_file, overwrite=True)
else:
sorted_bam_file=os.path.join(output_dir,sample_name+'_sorted')
command='%s sort %s %s'%(samtools_bin, bam_file, sorted_bam_file)
return_code = command_runner.run_command( command)
if return_code is not 0:
run_fine = False
if run_fine and clean_up:
return_code = remove_file(files_and_dir)
if return_code is not 0:
run_fine = False
return run_fine
def remove_file(files_and_dir):
return command_runner.run_command( 'rm -fr %s'%(' '.join(files_and_dir)))
def check_genome_index(genome_file):
genome_index_valid=True
extensions=['', '.amb', '.ann','.bwt','.sa', '.pac']
for ext in extensions:
if not os.path.exists(genome_file+ext):
logging.error("%s doesn't exist"%(genome_file+ext))
genome_index_valid=False
return genome_index_valid
def main():
#initialize the logging
utils_logging.init_logging()
#Setup options
optparser=_prepare_optparser()
(options,args) = optparser.parse_args()
#verify options
arg_pass=_verifyOption(options)
if not arg_pass:
logging.warning(optparser.get_usage())
logging.critical("Non valid arguments: exit")
sys.exit(1)
utils_logging.change_log_stdout_to_log_stderr()
if options.print_commands:
utils_logging.change_log_stdout_to_log_stderr()
else:
command_runner.set_command_to_run_localy()
run_fine=run_BWA_Command(options.genome_file, options.fastq_file1, options.fastq_file2,
options.output_dir, options.name,analysis_type=options.analysis,
sort=options.sort, thread=options.thread, read_group=options.read_group,
illumina=options.illumina)
if run_fine:
logging.info('Run completed')
else:
logging.error('Run Failed')
sys.exit(1)
def _prepare_optparser():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = """usage: %prog <-g genome_fasta> <-1 first fastq file> [ -2 second fastq file -n sample_name]"""
description = """This script will align read in Sanger fastq format to a reference genome and create a bam file. using bwa and samtools"""
prog_version=utils.getWtss_version()
optparser = OptionParser(version="%prog of wtss_pipeline v"+prog_version,description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="show this help message and exit.")
optparser.add_option("-g","--genome_file",dest="genome_file",type="string",
help="Path to a fasta file where the genome is located. Default: %default")
optparser.add_option("-1","--fastq1",dest="fastq_file1",type="string",
help="Path to the first fastq file where the first reads are. This file is mandatory. Default: %default")
optparser.add_option("-2","--fastq2",dest="fastq_file2",type="string",
help="Path to the second fastq file where the second reads are. This file is optional. Default: %default")
optparser.add_option("-o","--output_dir",dest="output_dir",type="string",
help="The path to the directory where the results will be output. If not set, the results will be put in the same folder as fastq1. Default: %default")
optparser.add_option("-n","--name",dest="name",type="string",
help="The name of the sample currently being aligned. Default: %default")
optparser.add_option("-s", "--sort",dest="sort",action='store_true',default=False,
help="Sort the bam file by coordinates at the end of the alignment. Default: %default")
optparser.add_option("-t", "--thread",dest="thread",type='int',default=1,
help="Number of thread used by the alignment algorithm. Default: %default")
optparser.add_option("--illumina",dest="illumina",action='store_true',default=False,
help="the fastq file are in illumina 1.3-1.6 fastq format. Default: %default")
optparser.add_option("--print",dest="print_commands",action='store_true',default=False,
help="Print the command instead of running them. Default: %default")
help_rna_seq="%s --> increase the maximum insert size to 20kb.\n"%(ANALYSIS_RNA_SEQ)
help_digit_transc="%s --> prevent any gap in the tag.\n"%(ANALYSIS_DIGITAL_TRANSC)
optparser.add_option("--analysis",dest="analysis",type="string",default=None,
help="Set analysis specific parameters:\n"+help_rna_seq+help_digit_transc+"Default: %default")
optparser.add_option("-r", "--readgroup",dest="read_group",type="string",help="Set read group for SAM file:\n"+"Example: RG\tID:uid\tSM:sample\tPL:Illumina\n")
return optparser
def _verifyOption(options):
"""Check if the mandatory option are present in the options objects.
@return False if any argument is wrong."""
arg_pass=True
if not options.genome_file :
logging.error("You must specify a genome fasta file.")
arg_pass=False
elif not os.path.exists(options.genome_file):
logging.error("Genome fasta file not found. You must specify an existing genome fasta file.")
arg_pass=False
elif not check_genome_index(options.genome_file):
logging.error("Genome fasta file is not indexed properly. You must index the genome first.")
arg_pass=False
#if not options.read_group:
# logging.error("You must specify a read group with ID, SM and PL attributes.")
# arg_pass=False
if not options.fastq_file1:
logging.error("You must specify at least one fastq file.")
arg_pass=False
elif not os.path.exists(options.fastq_file1):
logging.error("fastq1 file %s not found. You must specify an existing file."%options.fastq_file1)
arg_pass=False
if options.fastq_file2:
if not os.path.exists(options.fastq_file2):
logging.error("fastq2 file %s not found. You must specify an existing file."%options.fastq_file2)
arg_pass=False
if options.output_dir:
if not os.path.exists(options.output_dir):
logging.error("output directory %s not found. You must specify an existing directory."%options.output_dir)
arg_pass=False
return arg_pass
if __name__=="__main__":
main()
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .enums import try_enum, ComponentType, ButtonStyle
from .utils import get_slots, MISSING
from .partial_emoji import PartialEmoji, _EmojiTag
if TYPE_CHECKING:
from .types.components import (
Component as ComponentPayload,
ButtonComponent as ButtonComponentPayload,
SelectMenu as SelectMenuPayload,
SelectOption as SelectOptionPayload,
ActionRow as ActionRowPayload,
)
from .emoji import Emoji
__all__ = (
'Component',
'ActionRow',
'Button',
'SelectMenu',
'SelectOption',
)
C = TypeVar('C', bound='Component')
class Component:
"""Represents a Discord Bot UI Kit Component.
Currently, the only components supported by Discord are:
- :class:`ActionRow`
- :class:`Button`
- :class:`SelectMenu`
This class is abstract and cannot be instantiated.
.. versionadded:: 2.0
Attributes
------------
type: :class:`ComponentType`
The type of component.
"""
__slots__: Tuple[str, ...] = ('type',)
__repr_info__: ClassVar[Tuple[str, ...]]
type: ComponentType
def __repr__(self) -> str:
attrs = ' '.join(f'{key}={getattr(self, key)!r}' for key in self.__repr_info__)
return f'<{self.__class__.__name__} {attrs}>'
@classmethod
def _raw_construct(cls: Type[C], **kwargs) -> C:
self: C = cls.__new__(cls)
for slot in get_slots(cls):
try:
value = kwargs[slot]
except KeyError:
pass
else:
setattr(self, slot, value)
return self
def to_dict(self) -> Dict[str, Any]:
raise NotImplementedError
class ActionRow(Component):
"""Represents a Discord Bot UI Kit Action Row.
This is a component that holds up to 5 children components in a row.
This inherits from :class:`Component`.
.. versionadded:: 2.0
Attributes
------------
type: :class:`ComponentType`
The type of component.
children: List[:class:`Component`]
The children components that this holds, if any.
"""
__slots__: Tuple[str, ...] = ('children',)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: ComponentPayload):
self.type: ComponentType = try_enum(ComponentType, data['type'])
self.children: List[Component] = [_component_factory(d) for d in data.get('components', [])]
def to_dict(self) -> ActionRowPayload:
return {
'type': int(self.type),
'components': [child.to_dict() for child in self.children],
} # type: ignore
class Button(Component):
"""Represents a button from the Discord Bot UI Kit.
This inherits from :class:`Component`.
.. note::
The user constructible and usable type to create a button is :class:`discord.ui.Button`
not this one.
.. versionadded:: 2.0
Attributes
-----------
style: :class:`.ButtonStyle`
The style of the button.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
url: Optional[:class:`str`]
The URL this button sends you to.
disabled: :class:`bool`
Whether the button is disabled or not.
label: Optional[:class:`str`]
The label of the button, if any.
emoji: Optional[:class:`PartialEmoji`]
The emoji of the button, if available.
"""
__slots__: Tuple[str, ...] = (
'style',
'custom_id',
'url',
'disabled',
'label',
'emoji',
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: ButtonComponentPayload):
self.type: ComponentType = try_enum(ComponentType, data['type'])
self.style: ButtonStyle = try_enum(ButtonStyle, data['style'])
self.custom_id: Optional[str] = data.get('custom_id')
self.url: Optional[str] = data.get('url')
self.disabled: bool = data.get('disabled', False)
self.label: Optional[str] = data.get('label')
self.emoji: Optional[PartialEmoji]
try:
self.emoji = PartialEmoji.from_dict(data['emoji'])
except KeyError:
self.emoji = None
def to_dict(self) -> ButtonComponentPayload:
payload = {
'type': 2,
'style': int(self.style),
'label': self.label,
'disabled': self.disabled,
}
if self.custom_id:
payload['custom_id'] = self.custom_id
if self.url:
payload['url'] = self.url
if self.emoji:
payload['emoji'] = self.emoji.to_dict()
return payload # type: ignore
class SelectMenu(Component):
"""Represents a select menu from the Discord Bot UI Kit.
A select menu is functionally the same as a dropdown, however
on mobile it renders a bit differently.
.. note::
The user constructible and usable type to create a select menu is
:class:`discord.ui.Select` not this one.
.. versionadded:: 2.0
Attributes
------------
custom_id: Optional[:class:`str`]
The ID of the select menu that gets received during an interaction.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
options: List[:class:`SelectOption`]
A list of options that can be selected in this menu.
disabled: :class:`bool`
Whether the select is disabled or not.
"""
__slots__: Tuple[str, ...] = (
'custom_id',
'placeholder',
'min_values',
'max_values',
'options',
'disabled',
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: SelectMenuPayload):
self.type = ComponentType.select
self.custom_id: str = data['custom_id']
self.placeholder: Optional[str] = data.get('placeholder')
self.min_values: int = data.get('min_values', 1)
self.max_values: int = data.get('max_values', 1)
self.options: List[SelectOption] = [SelectOption.from_dict(option) for option in data.get('options', [])]
self.disabled: bool = data.get('disabled', False)
def to_dict(self) -> SelectMenuPayload:
payload: SelectMenuPayload = {
'type': self.type.value,
'custom_id': self.custom_id,
'min_values': self.min_values,
'max_values': self.max_values,
'options': [op.to_dict() for op in self.options],
'disabled': self.disabled,
}
if self.placeholder:
payload['placeholder'] = self.placeholder
return payload
class SelectOption:
"""Represents a select menu's option.
These can be created by users.
.. versionadded:: 2.0
Attributes
-----------
label: :class:`str`
The label of the option. This is displayed to users.
Can only be up to 25 characters.
value: :class:`str`
The value of the option. This is not displayed to users.
If not provided when constructed then it defaults to the
label. Can only be up to 100 characters.
description: Optional[:class:`str`]
An additional description of the option, if any.
Can only be up to 50 characters.
emoji: Optional[Union[:class:`str`, :class:`Emoji`, :class:`PartialEmoji`]]
The emoji of the option, if available.
default: :class:`bool`
Whether this option is selected by default.
"""
__slots__: Tuple[str, ...] = (
'label',
'value',
'description',
'emoji',
'default',
)
def __init__(
self,
*,
label: str,
value: str = MISSING,
description: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
default: bool = False,
) -> None:
self.label = label
self.value = label if value is MISSING else value
self.description = description
if emoji is not None:
if isinstance(emoji, str):
emoji = PartialEmoji.from_str(emoji)
elif isinstance(emoji, _EmojiTag):
emoji = emoji._to_partial()
else:
raise TypeError(f'expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}')
self.emoji = emoji
self.default = default
def __repr__(self) -> str:
return (
f'<SelectOption label={self.label!r} value={self.value!r} description={self.description!r} '
f'emoji={self.emoji!r} default={self.default!r}>'
)
def __str__(self) -> str:
if self.emoji:
base = f'{self.emoji} {self.label}'
else:
base = self.label
if self.description:
return f'{base}\n{self.description}'
return base
@classmethod
def from_dict(cls, data: SelectOptionPayload) -> SelectOption:
try:
emoji = PartialEmoji.from_dict(data['emoji'])
except KeyError:
emoji = None
return cls(
label=data['label'],
value=data['value'],
description=data.get('description'),
emoji=emoji,
default=data.get('default', False),
)
def to_dict(self) -> SelectOptionPayload:
payload: SelectOptionPayload = {
'label': self.label,
'value': self.value,
'default': self.default,
}
if self.emoji:
payload['emoji'] = self.emoji.to_dict() # type: ignore
if self.description:
payload['description'] = self.description
return payload
def _component_factory(data: ComponentPayload) -> Component:
component_type = data['type']
if component_type == 1:
return ActionRow(data)
elif component_type == 2:
return Button(data) # type: ignore
elif component_type == 3:
return SelectMenu(data) # type: ignore
else:
as_enum = try_enum(ComponentType, component_type)
return Component._raw_construct(type=as_enum)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9817")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9817")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Iridium address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Iridium address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# To run:
# python example-riaks.py deploy
# python example-riaks.py add_haproxies
# solar changes stage
# solar changes process
# solar orch run-once last
import click
import sys
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import virtual_resource as vr
from solar import errors
from solar.interfaces.db import get_db
from solar.events.controls import React, Dep
from solar.events.api import add_event
db = get_db()
def setup_riak():
db.clear()
signals.Connections.clear()
nodes = vr.create('nodes', 'templates/riak_nodes.yml', {})
node1, node2, node3 = nodes
riak_services = []
ips = '10.0.0.%d'
for i in xrange(3):
num = i + 1
r = vr.create('riak_service%d' % num,
'resources/riak_node',
{'riak_self_name': 'riak%d' % num,
'riak_hostname': 'riak_server%d.solar' % num,
'riak_name': 'riak%d@riak_server%d.solar' % (num, num)})[0]
riak_services.append(r)
for i, riak in enumerate(riak_services):
signals.connect(nodes[i], riak)
for i, riak in enumerate(riak_services[1:]):
signals.connect(riak_services[0], riak, {'riak_name': 'join_to'}, events=None)
hosts_services = []
for i, riak in enumerate(riak_services):
num = i + 1
hosts_file = vr.create('hosts_file%d' % num,
'resources/hosts_file', {})[0]
hosts_services.append(hosts_file)
signals.connect(nodes[i], hosts_file)
for riak in riak_services:
for hosts_file in hosts_services:
signals.connect(riak, hosts_file,
{'riak_hostname': 'hosts_names', 'ip': 'hosts_ips'},
events=False)
has_errors = False
for r in locals().values():
# TODO: handle list
if not isinstance(r, resource.Resource):
continue
# print 'Validating {}'.format(r.name)
local_errors = validation.validate_resource(r)
if local_errors:
has_errors = True
print 'ERROR: %s: %s' % (r.name, errors)
if has_errors:
print "ERRORS"
sys.exit(1)
events = [
Dep('hosts_file1', 'run', 'success', 'riak_service1', 'run'),
Dep('hosts_file2', 'run', 'success', 'riak_service2', 'run'),
Dep('hosts_file3', 'run', 'success', 'riak_service3', 'run'),
React('riak_service2', 'run', 'success', 'riak_service2', 'join'),
React('riak_service3', 'run', 'success', 'riak_service3', 'join'),
# Dep('riak_service1', 'run', 'success', 'riak_service2', 'join'),
# Dep('riak_service1', 'run', 'success', 'riak_service3', 'join'),
# React('riak_service2', 'join', 'error', 'riak_service2', 'leave'),
# React('riak_service3', 'join', 'error', 'riak_service3', 'leave'),
React('riak_service2', 'leave', 'success', 'riak_service2', 'join'),
React('riak_service3', 'leave', 'success', 'riak_service3', 'join'),
# React('riak_service2', 'leave', 'success', 'riak_service1', 'commit_leave'),
# React('riak_service3', 'leave', 'success', 'riak_service1', 'commit_leave'),
# Dep('riak_service1', 'commit_leave', 'success', 'riak_service2', 'join'),
# Dep('riak_service1', 'commit_leave', 'success', 'riak_service3', 'join'),
React('riak_service3', 'join', 'success', 'riak_service1', 'commit'),
React('riak_service2', 'join', 'success', 'riak_service1', 'commit')
]
for event in events:
add_event(event)
print 'Use solar changes process & orch'
sys.exit(0)
def setup_haproxies():
hps = []
hpc = []
hpsc_http = []
hpsc_pb = []
for i in xrange(3):
num = i + 1
hps.append(vr.create('haproxy_service%d' % num,
'resources/haproxy_service',
{})[0])
hpc.append(vr.create('haproxy_config%d' % num,
'resources/haproxy_config',
{})[0])
hpsc_http.append(vr.create('haproxy_service_config_http%d' % num,
'resources/haproxy_service_config',
{'listen_port': 8098,
'protocol': 'http',
'name': 'riak_haproxy_http%d' % num})[0])
hpsc_pb.append(vr.create('haproxy_service_config_pb%d' % num,
'resources/haproxy_service_config',
{'listen_port': 8087,
'protocol': 'tcp',
'name': 'riak_haproxy_pb%d' % num})[0])
riak1 = resource.load('riak_service1')
riak2 = resource.load('riak_service2')
riak3 = resource.load('riak_service3')
riaks = [riak1, riak2, riak3]
for single_hpsc in hpsc_http:
for riak in riaks:
signals.connect(riak, single_hpsc, {'riak_hostname': 'servers',
'riak_port_http': 'ports'})
for single_hpsc in hpsc_pb:
for riak in riaks:
signals.connect(riak, single_hpsc, {'riak_hostname': 'servers',
'riak_port_pb': 'ports'})
# haproxy config to haproxy service
for single_hpc, single_hpsc in zip(hpc, hpsc_http):
signals.connect(single_hpsc, single_hpc, {'protocol': 'configs_protocols',
'listen_port': 'listen_ports',
'name': 'configs_names',
'servers': 'configs',
'ports': 'configs_ports'})
for single_hpc, single_hpsc in zip(hpc, hpsc_pb):
signals.connect(single_hpsc, single_hpc, {'protocol': 'configs_protocols',
'listen_port': 'listen_ports',
'name': 'configs_names',
'servers': 'configs',
'ports': 'configs_ports'})
for single_hps, single_hpc in zip(hps, hpc):
signals.connect(single_hpc, single_hps, {'listen_ports': 'ports'},
events=False)
# assign haproxy services to each node
node1 = resource.load('node1')
node2 = resource.load('node2')
node3 = resource.load('node3')
nodes = [node1, node2, node3]
for single_node, single_hps in zip(nodes, hps):
signals.connect(single_node, single_hps)
for single_node, single_hpc in zip(nodes, hpc):
signals.connect(single_node, single_hpc)
has_errors = False
for r in locals().values():
# TODO: handle list
if not isinstance(r, resource.Resource):
continue
# print 'Validating {}'.format(r.name)
local_errors = validation.validate_resource(r)
if local_errors:
has_errors = True
print 'ERROR: %s: %s' % (r.name, errors)
if has_errors:
print "ERRORS"
sys.exit(1)
events = []
for node, single_hps, single_hpc in zip(nodes, hps, hpc):
# r = React(node.name, 'run', 'success', single_hps.name, 'install')
d = Dep(single_hps.name, 'run', 'success', single_hpc.name, 'run')
e1 = React(single_hpc.name, 'run', 'success', single_hps.name, 'apply_config')
e2 = React(single_hpc.name, 'update', 'success', single_hps.name, 'apply_config')
# events.extend([r, d, e1, e2])
events.extend([d, e1, e2])
for event in events:
add_event(event)
@click.group()
def main():
pass
@click.command()
def deploy():
setup_riak()
@click.command()
def add_haproxies():
setup_haproxies()
@click.command()
def undeploy():
raise NotImplemented("Not yet")
main.add_command(deploy)
main.add_command(undeploy)
main.add_command(add_haproxies)
if __name__ == '__main__':
main()
|
|
from functools import reduce
import operator, readtime
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.utils import timezone
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.contrib.contenttypes.fields import GenericRelation
from ckeditor.fields import RichTextField
from cuser.middleware import CuserMiddleware
from hitcount.models import HitCount, HitCountMixin
from diventi.core.models import (
TimeStampedModel,
PromotableModel,
PublishableModel,
PublishableModelQuerySet,
Category,
DiventiImageModel,
DiventiCoverModel,
Element,
DiventiColModel,
)
class BlogCover(DiventiCoverModel, Element):
"""
Stores cover images for the blog page.
"""
class Meta:
verbose_name = _('Blog Cover')
verbose_name_plural = _('Blog Covers')
class ArticleCategory(Element):
"""
Defines the main argument of any article.
"""
class Meta:
verbose_name = _('Article Category')
verbose_name_plural = _('Article Categories')
class ArticleQuerySet(PublishableModelQuerySet):
# Selet articles' related objects
def prefetch(self):
articles = self.select_related('category')
articles = articles.select_related('author')
articles = articles.prefetch_related('related_articles')
articles = articles.prefetch_related('promotions')
return articles
# Get the list of published articles from the most recent to the least
def history(self):
articles = self.published()
articles = articles.order_by('-publication_date')
return articles
# Get the list of published articles but excludes the hot ones
def history_but_not_hot(self):
articles = self.history().exclude(hot=True)
return articles
# Get the list of published articles of a certain category
def category(self, category_title):
articles = self.history().filter(category__title=category_title)
return articles
# Get the featured articles
def hot(self):
articles = self.history().filter(hot=True)
return articles
# Get the hottest article
def hottest(self):
article = self.hot().latest('publication_date')
return article
# Fetch all the promotions related to the article
def promotions(self):
article = self.prefetch_related('promotions')
return article
# Get the most recent article
def current(self):
try:
article = self.hottest()
except Article.DoesNotExist:
article = self.published().latest('publication_date')
return article
# Get the published articles, counted by django hitcount
def hit_count(self):
articles = self.published().order_by('-hit_count_generic__hits')
return articles
# Get the most viewed articles, counted by django hitcount
def popular(self):
articles = self.hit_count()[:3]
return articles
class Article(TimeStampedModel, PromotableModel, PublishableModel, DiventiImageModel, DiventiColModel, Element, HitCountMixin):
"""
Blog posts are built upon a specific category and are always
introduced by a nice heading picture.
"""
category = models.ForeignKey(
ArticleCategory,
null=True,
verbose_name=_('category'),
on_delete=models.SET_NULL
)
content = RichTextField(
verbose_name=_('content')
)
hot = models.BooleanField(
default=False,
verbose_name=_('hot')
)
slug = models.SlugField(
unique=True,
verbose_name=_('slug')
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
related_name='articles',
verbose_name=_('author'),
on_delete=models.SET_NULL
)
related_articles = models.ManyToManyField(
'self',
related_name='related_articles',
blank=True,
verbose_name=_('related articles'),
) # Connect this article to others
hit_count_generic = GenericRelation(
HitCount,
object_id_field='object_pk',
related_query_name='hit_count_generic_relation'
) # Counts the views on this model
objects = ArticleQuerySet.as_manager()
class Meta:
verbose_name = _('article')
verbose_name_plural = _('articles')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail', args=[str(self.slug)])
def search(self, query, *args, **kwargs):
results = Article.objects.history()
query_list = query.split()
results = results.filter(
reduce(operator.and_,
(Q(title__icontains=q) for q in query_list)) |
reduce(operator.and_,
(Q(description__icontains=q) for q in query_list))
)
return results
def reporting(self, *args, **kwargs):
queryset = Article.objects.popular()
results = []
for article in queryset:
results.append({
'columns': 4,
'name': '%(article)s' % {
'article': article.title,
},
'title': article.hit_count.hits,
'description1': _('views in the last week: %(d)s') % {
'd': article.hit_count.hits_in_last(days=7),
},
'description2': '',
'action': '',
})
return results
def get_readtime(self):
result = readtime.of_text(self.content)
return result.text
get_readtime.short_description = _('Readtime')
def get_words_number(self):
words = self.content.split()
result = len(words)
return result
get_words_number.short_description = _('Words number')
def get_hitcounts(self):
return self.hit_count.hits
get_hitcounts.short_description = _('Hit counts')
get_hitcounts.admin_order_field = 'hit_count_generic__hits'
def class_name(self):
return _('article')
|
|
# coding: utf-8
import datetime, hashlib, decimal
from sqlalchemy import sql, Column, String, Integer, Boolean, DateTime, Float, ForeignKey
from sqlalchemy.orm import relation, backref, column_property, synonym
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from tornado.options import options
from utils.coredb import BaseQuery, Base
from utils.escape import json_encode, json_decode
class UserQuery(BaseQuery):
def get_users(self):
return self.filter_by(block=False)
def get_by_phone(self, phn):
''' Get user from users table return the User object
or Not exisit and Multi exisit return None
'''
# FIXME
try:
u = self.get_users().filter_by(phone=phn).one()
self.session.commit()
except (NoResultFound, MultipleResultsFound):
u = None
except:
self.session.rollback()
raise
#return self.get_users().filter_by(phone=phn).first()
return u
def get_by_token(self, token):
''' Get user from users table return the User object
or Not exisit and Multi exisit return None
'''
# FIXME
try:
u = self.get_users().filter_by(token=token).one()
self.session.commit()
except (NoResultFound, MultipleResultsFound):
u = None
except:
self.session.rollback()
raise
#return self.get_users().filter_by(token=token).first()
return u
class LoudQuery(BaseQuery):
def get_louds(self):
return self.filter(Loud.block==False).filter(Loud.user_id>0)
def get_by_cycle2(self, user_lat, user_lon):
return self.get_by_cycle(user_lat, user_lon).filter(Loud.block==False)
def cycle_update(self, user_lat, user_lon, updated):
return self.get_by_cycle(user_lat, user_lon).filter(Loud.updated>=updated)
def get_by_cycle(self, user_lat, user_lon):
# geo args
earth_r, distance = options.er, options.cr
# ignore user's small movement lat: 55.66m, lon: 54.93m
user_lat = decimal.Decimal(user_lat).quantize(decimal.Decimal('0.0001'))
user_lon = decimal.Decimal(user_lon).quantize(decimal.Decimal('0.0001'))
# mysql functions
acos, sin, cos, pi, abs = sql.func.acos, sql.func.sin, sql.func.cos, sql.func.pi, sql.func.abs
return self.filter(sql.or_(Loud.grade==0, abs(earth_r*acos(sin(user_lat)*sin(Loud.lat)*cos(user_lon-Loud.lon)+cos(user_lat)*cos(Loud.lat))*pi()/180)<distance))
def get_by_cycle_key(self, user_lat, user_lon, key):
return self.get_by_cycle2(user_lat, user_lon).filter(Loud.content.like('%'+key+'%'))
class User(Base):
__tablename__ = 'users'
query_class = UserQuery
id = Column(Integer, primary_key=True)
phone = Column(Integer, unique=True)
_password = Column("password", String(32))
name = Column(String(20))
avatar = Column(String(100), nullable=True)
token = Column(String(32), nullable=True)
last_lat = Column(Float, default=0)
last_lon = Column(Float, default=0)
radius = Column(Float, nullable=True, default=2.5)
is_admin = Column(Boolean, default=False)
block = Column(Boolean, default=False)
updated = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created = Column(DateTime, default=datetime.datetime.now)
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
def __repr__(self):
return "<user:%s>" % self.phone
def __str__(self):
return "<user:%s>" % self.phone
def _get_password(self):
return self._password
def _set_password(self, password):
self._password = hashlib.md5(password).hexdigest()
password = synonym("_password", descriptor=property(_get_password, _set_password))
def can_save(self):
return self.phone and self.password and self.name
def owner_by(self, u):
return u and u.id == self.id
def admin_by(self, u):
return self.owner_by(u) or u.is_admin
def authenticate(self, password):
return self.password == hashlib.md5(password).hexdigest()
def user_to_dict(self, u):
if self.owner_by(u):
# owner 's
info = self.user_to_dict_by_owner()
else:
info = self.user_to_dict_by_other()
return info
def user_to_dict_by_other(self):
# (id, link, phone, name, avatar, last_lon, last_lat, updated, is_admin)
info = self.to_dict(include=['phone', 'name', 'avatar', 'last_lat', 'last_lon', 'updated',
'is_admin'])
info['id'] = self.get_urn_id()
info['link'] = self.get_link()
info['avatar_link'] = self.get_avatar_link()
return info
def user_to_dict_by_owner(self):
# (id, link, phone, name, avatar, last_lat, last_lon, is_admin, updated, created, radius,
# loud_num)
info = self.to_dict(include=['phone', 'name', 'avatar', 'last_lat', 'last_lon','is_admin',
'radius', 'updated', 'created'])
info['id'] = self.get_urn_id()
info['link'] = self.get_link()
info['avatar_link'] = self.get_avatar_link()
info['loud_num'] = self.loud_num
return info
def user_to_dict_by_auth(self):
info = self.to_dict(include=['name', 'token', 'phone', 'updated'])
return info
def user2dict4redis(self):
info = self.to_dict(include=['name', 'phone', 'id', 'is_admin'])
return info
def get_link(self):
return "%s%s" % (options.site_uri, self.reverse_uri('user', self.phone))
def get_avatar_link(self):
return "%s/%s" % (options.static_uri, self.avatar)
def generate_avatar_path(self):
if self.phone:
self.avatar = 'i/%s.jpg' % hashlib.md5(str(self.phone)).hexdigest()
class Loud(Base):
__tablename__ = 'louds'
query_class = LoudQuery
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id', ondelete="CASCADE"))
content = Column(String(70))
lon = Column(Float, default=0)
lat = Column(Float, default=0)
flon = Column(Float, default=0, nullable=True)
flat = Column(Float, default=0, nullable=True)
address = Column(String(30), nullable=True)
grade = Column(Integer, default=5)
block = Column(Boolean, default=False)
updated = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created = Column(DateTime, default=datetime.datetime.now)
# on delete CASCADE make me a lots to fix it.
# use this feature you must do two things:
# 1) Column ForeignKey set ondelete keyword for database level
# 2) mapper on relation set cascade keyword in parent Model for sqlalchemy session level
user = relation('User', backref=backref('louds', order_by=created, cascade="all, delete, delete-orphan"))
def __init__(self, *args, **kwargs):
super(Loud, self).__init__(*args, **kwargs)
def __repr__(self):
return "<loud:%s>" % self.id
def __str__(self):
return "<loud:%s>" % self.id
def can_save(self):
return self.user_id and self.content and self.lat and self.lon
def owner_by(self, u):
return u and u.id == self.user_id
def admin_by(self, u):
return self.owner_by(u) or u.is_admin
def loud_to_dict(self):
loud_dict = self.to_dict(include=['content', 'grade', 'address', 'lat', 'lon', 'flat',
'flon', 'created'])
loud_dict['user'] = self.user.user_to_dict_by_other()
loud_dict['id'] = self.get_urn_id()
loud_dict['link'] = self.get_link()
return loud_dict
def get_link(self):
return "%s%s" % (options.site_uri, self.reverse_uri('loud', self.id))
# user's all louds number
User.loud_num = column_property(sql.select([sql.func.count(Loud.id)]).\
where(Loud.user_id==User.id).as_scalar(), deferred=True)
|
|
# Copyright 2011 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the main OpenFlow module.
Along with libopenflow, this is the major part of the OpenFlow API in POX.
There are a number of Events, which are generally raised on core.openflow
as well as on individual switch Connections. Many of these events have at
least some of the following properties:
.connection - a reference to the switch connection that caused the event
.dpid - the DPID of the switch that caused the event
.ofp - the OpenFlow message that caused the event (from libopenflow)
One of the more complicated aspects of OpenFlow is dealing with stats
replies, which may come in multiple parts (it shouldn't be that that
difficult, really, but that hasn't stopped it from beind handled wrong
wrong more than once). In POX, the raw events are available, but you will
generally just want to listen to the aggregate stats events which take
care of this for you and are only fired when all data is available.
NOTE: This module is usually automatically loaded by pox.py
"""
from pox.lib.revent import *
from pox.lib.util import dpidToStr
import libopenflow_01 as of
from pox.lib.packet.ethernet import ethernet
class ConnectionUp (Event):
"""
Event raised when the connection to an OpenFlow switch has been
established.
"""
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.dpid = connection.dpid
self.ofp = ofp
class FeaturesReceived (Event):
"""
Raised upon receipt of an ofp_switch_features message
This generally happens as part of a connection automatically.
"""
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.dpid = connection.dpid
self.ofp = ofp
class ConnectionDown (Event):
"""
Event raised when the connection to an OpenFlow switch has been
lost.
"""
def __init__ (self, connection):
Event.__init__(self)
self.connection = connection
self.dpid = connection.dpid
class PortStatus (Event):
"""
Fired in response to port status changes.
added (bool) - True if fired because a port was added
deleted (bool) - True if fired because a port was deleted
modified (bool) - True if fired because a port was modified
port (int) - number of port in question
"""
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.dpid = connection.dpid
self.ofp = ofp
self.modified = ofp.reason == of.OFPPR_MODIFY
self.added = ofp.reason == of.OFPPR_ADD
self.deleted = ofp.reason == of.OFPPR_DELETE
self.port = ofp.desc.port_no
class FlowRemoved (Event):
"""
Raised when a flow entry has been removed from a flow table.
This may either be because of a timeout or because it was removed
explicitly.
Properties:
idleTimeout (bool) - True if expired because of idleness
hardTimeout (bool) - True if expired because of hard timeout
timeout (bool) - True if either of the above is true
deleted (bool) - True if deleted explicitly
"""
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.dpid = connection.dpid
self.ofp = ofp
self.idleTimeout = False
self.hardTimeout = False
self.deleted = False
self.timeout = False
if ofp.reason == of.OFPRR_IDLE_TIMEOUT:
self.timeout = True
self.idleTimeout = True
elif ofp.reason == of.OFPRR_HARD_TIMEOUT:
self.timeout = True
self.hardTimeout = True
elif ofp.reason == of.OFPRR_DELETE:
self.deleted = True
class RawStatsReply (Event):
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.ofp = ofp # Raw ofp message(s)
@property
def dpid (self):
return self.connection.dpid
class StatsReply (Event):
""" Abstract superclass for all stats replies """
def __init__ (self, connection, ofp, stats):
Event.__init__(self)
self.connection = connection
self.ofp = ofp # Raw ofp message(s)
self.stats = stats # Processed
@property
def dpid (self):
return self.connection.dpid
class SwitchDescReceived (StatsReply):
pass
class FlowStatsReceived (StatsReply):
pass
class AggregateFlowStatsReceived (StatsReply):
pass
class TableStatsReceived (StatsReply):
pass
class PortStatsReceived (StatsReply):
pass
class QueueStatsReceived (StatsReply):
pass
class PacketIn (Event):
"""
Fired in response to PacketIn events
port (int) - number of port the packet came in on
data (bytes) - raw packet data
parsed (packet subclasses) - pox.lib.packet's parsed version
"""
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.ofp = ofp
self.port = ofp.in_port
self.data = ofp.data
self._parsed = None
self.dpid = connection.dpid
def parse (self):
if self._parsed is None:
self._parsed = ethernet(self.data)
return self._parsed
@property
def parsed (self):
"""
The packet as parsed by pox.lib.packet
"""
return self.parse()
class ErrorIn (Event):
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.ofp = ofp
self.xid = ofp.xid
self.dpid = connection.dpid
self.should_log = True # If this remains True, an error will be logged
def asString (self):
return self.ofp.show()
# def lookup (m, v):
# if v in m:
# return str(m[v])
# else:
# return "Unknown/" + str(v)
#
# #TODO: The show() in ofp_error actually does some clever
# # stuff now to stringize error messages. Refactor that and the
# # (less clever) code below.
# s = 'Type: ' + lookup(of.ofp_error_type_map, self.ofp.type)
# s += ' Code: '
#
# responses = {
# of.OFPET_HELLO_FAILED : of.ofp_hello_failed_code,
# of.OFPET_BAD_REQUEST : of.ofp_bad_request_code,
# of.OFPET_BAD_ACTION : of.ofp_bad_action_code,
# of.OFPET_FLOW_MOD_FAILED : of.ofp_flow_mod_failed_code,
# of.OFPET_PORT_MOD_FAILED : of.ofp_port_mod_failed_code,
# of.OFPET_QUEUE_OP_FAILED : of.ofp_queue_op_failed_code,
# }
#
# if self.ofp.type in responses:
# s += lookup(responses[self.ofp.type],self.ofp.code)
# else:
# s += "Unknown/" + str(self.ofp.code)
# if self.ofp.type == of.OFPET_HELLO_FAILED:
# s += lookup(of.ofp_hello_failed_code, self.ofp.type)
#
# return s
class BarrierIn (Event):
"""
Fired in response to a barrier reply
xid (int) - XID of barrier request
"""
def __init__ (self, connection, ofp):
Event.__init__(self)
self.connection = connection
self.ofp = ofp
self.dpid = connection.dpid
self.xid = ofp.xid
class ConnectionIn (Event):
def __init__ (self, connection):
super(ConnectionIn,self).__init__()
self.connection = connection
self.dpid = connection.dpid
self.nexus = None
class OpenFlowConnectionArbiter (EventMixin):
"""
Determines which OpenFlowNexus gets the switch.
Default implementation always just gives it to core.openflow
"""
_eventMixin_events = set([
ConnectionIn,
])
def __init__ (self, default = False):
""" default as False causes it to always use core.openflow """
self._default = default
self._fallback = None
def getNexus (self, connection):
e = ConnectionIn(connection)
self.raiseEventNoErrors(e)
if e.nexus is None:
e.nexus = self._default
if e.nexus is False:
if self._fallback is None:
try:
from pox.core import core
self._fallback = core.openflow
except:
raise RuntimeError("No OpenFlow nexus for new connection")
e.nexus = self._fallback
return e.nexus
class ConnectionDict (dict):
def __iter__ (self):
return self.itervalues()
def __contains__ (self, item):
v = dict.__contains__(self, item)
if v: return v
return item in self.values()
@property
def dpids (self):
return self.keys()
def iter_dpids (self):
return self.iterkeys()
class OpenFlowNexus (EventMixin):
"""
Main point of OpenFlow interaction.
There is usually just one instance of this class, registered as
core.openflow. Most OpenFlow events fire here in addition to on their
specific connections.
"""
_eventMixin_events = set([
ConnectionUp,
ConnectionDown,
FeaturesReceived,
PortStatus,
FlowRemoved,
PacketIn,
BarrierIn,
ErrorIn,
RawStatsReply,
SwitchDescReceived,
FlowStatsReceived,
AggregateFlowStatsReceived,
TableStatsReceived,
PortStatsReceived,
QueueStatsReceived,
FlowRemoved,
])
# Bytes to send to controller when a packet misses all flows
miss_send_len = of.OFP_DEFAULT_MISS_SEND_LEN
# Enable/Disable clearing of flows on switch connect
clear_flows_on_connect = True
def __init__ (self):
self._connections = ConnectionDict() # DPID -> Connection
from pox.core import core
self.listenTo(core)
@property
def connections (self):
return self._connections
def getConnection (self, dpid):
"""
Get the Connection object associated with a DPID.
"""
return self._connections.get(dpid, None)
def sendToDPID (self, dpid, data):
"""
Send data to a specific DPID.
"""
if dpid in self._connections:
self._connections[dpid].send(data)
return True
else:
import logging
log = logging.getLogger("openflow")
log.warn("Couldn't send to %s because we're not connected to it!" %
(dpidToStr(dpid),))
return False
def _handle_DownEvent (self, event):
for c in self._connections.values():
try:
c.disconnect()
except:
pass
def _connect (self, con):
self._connections[con.dpid] = con
def _disconnect (self, dpid):
if dpid in self._connections:
del self._connections[dpid]
return True
return False
def launch (default_arbiter=True):
from pox.core import core
if core.hasComponent("openflow"):
return
if default_arbiter:
core.registerNew(OpenFlowConnectionArbiter)
core.register("openflow", OpenFlowNexus())
|
|
"""Implement the rules of each Python build utility type."""
import compileall
import logging
import os
import subprocess
import zipfile
import mool.shared_utils as su
MAIN_FILE_NAME = '__main__.py'
INIT_FILE_NAME = '__init__.py'
PY_SHEBANG_HEADER = '#!/usr/bin/env python2.7\n'
SHEBANG_SYMBOL = '#!'
# Thanks to the Python community for making this part significantly simpler
# than the py.test binary generation part. Essentially python2.7 interpreter
# can handle a zip file as a module now.
PY_MODULE_NAME_SEPARATOR = '.'
MAIN_FILE_CONTENTS_FOR_BIN = """
import sys
import {0}
sys.exit({0}.{1}())
"""
MAIN_FILE_CONTENTS_FOR_TEST = """
import os
import shutil
import sys
import subprocess
import zipfile
# Get the full path name.
binary_file_path = os.path.abspath(sys.argv[0])
assert os.path.exists(binary_file_path)
# Get the path to the extraction directory.
zipdir_path = binary_file_path + '.extract'
# Since we cannot guarantee contents of directory if it exists, we need to
# make a fresh start to avoid unpredictable test behavior.
if os.path.exists(zipdir_path):
shutil.rmtree(zipdir_path)
# Extract python zipped file data to extraction directory.
os.makedirs(zipdir_path)
with zipfile.ZipFile(binary_file_path, 'r') as zipfile_obj:
zipfile_obj.extractall(zipdir_path)
# Switch path to extracted directory and execute py.test.
os.chdir(zipdir_path)
subprocess.check_call(['py.test', '-s', '.'])
"""
class Error(su.Error):
"""Error class for this module."""
def compile_all(params):
"""Compile python code recursively under current directory."""
assert not params
compileall.compile_dir('./', quiet=True)
def expand_lib(params):
"""Expand a link library in place."""
link_lib, is_zipped = params
tracer = logging.debug
if is_zipped:
tracer('Expanding zipped lib at %s', link_lib)
with zipfile.ZipFile(link_lib, 'r') as zip_obj:
zip_obj.extractall()
else:
tracer('Expanding directory lib at %s', link_lib)
# Recursively copying a directory to another. We cannot use shutil.copytree
# as we cannot assume the directory to be new.
link_lib = link_lib if link_lib.endswith(os.sep) else link_lib + os.sep
for root, _, files in os.walk(link_lib):
for file_path in files:
src_file = os.path.join(root, file_path)
dst_file = src_file.replace(link_lib, '.' + os.sep)
tracer('Copying %s to %s', src_file, dst_file)
subprocess.check_call(su.get_mkdir_command(os.path.dirname(dst_file)))
subprocess.check_call(['cp', src_file, dst_file])
# Remove any main file in zip root coming from python executable binary.
if os.path.exists(MAIN_FILE_NAME):
os.remove(MAIN_FILE_NAME)
def run_pylint_checks(params):
"""Run python pylint checking."""
# This is done separately as we need to add proto/thrift packages to python
# path. This is the only way to run it without modifying user's PYTHONPATH.
assert len(params) == 2
command = ['pylint']
command.extend(params)
new_path = '{}:{}'.format(
os.path.join(su.THRIFT_INSTALL_DIR, 'pylib'),
os.environ['PYTHON_PROTOBUF_DIR'])
if 'PYTHONPATH' in os.environ:
new_path = '{}:{}'.format(new_path, os.environ['PYTHONPATH'])
environment = {'PATH': os.environ.get('PATH', ''), 'PYTHONPATH': new_path}
subprocess.check_call(command, env=environment)
def _main_file_contents_for_bin(main_class):
"""Get file contents of __main__.py for python binary."""
parts = main_class.split(PY_MODULE_NAME_SEPARATOR)
module = PY_MODULE_NAME_SEPARATOR.join(parts[:-1])
entry_point = parts[-1]
return MAIN_FILE_CONTENTS_FOR_BIN.format(module, entry_point)
def perform_linking(command_parts):
"""Steps to perform actual python linking."""
rule_type, main_class, tmp_out_file, out_file = command_parts
main_file_path = os.path.join('.', MAIN_FILE_NAME)
if su.PYTHON_BIN_TYPE == rule_type:
su.write_file(main_file_path, _main_file_contents_for_bin(main_class))
elif su.PYTHON_TEST_TYPE == rule_type:
su.write_file(main_file_path, MAIN_FILE_CONTENTS_FOR_TEST)
with zipfile.ZipFile(tmp_out_file, 'w') as zip_obj:
for root, _, files in os.walk('.'):
for file_path in files:
full_file_path = os.path.join(root, file_path)
# It is ok to skip raw .py files for py binaries. The directory has
# been compiled before the linking step. Therefore it is sufficient to
# ignore the .py files and pick up the .pyc files instead.
skip_file = (rule_type == su.PYTHON_BIN_TYPE and
full_file_path != main_file_path and
file_path.endswith('.py'))
if skip_file:
continue
zip_obj.write(full_file_path)
if su.PYTHON_LIB_TYPE == rule_type:
subprocess.check_call(['mv', tmp_out_file, out_file])
elif su.PYTHON_BIN_TYPE == rule_type:
su.write_file(out_file, PY_SHEBANG_HEADER + su.read_file(tmp_out_file))
subprocess.check_call(['chmod', '+x', out_file])
else:
assert su.PYTHON_TEST_TYPE == rule_type
su.write_file(out_file, PY_SHEBANG_HEADER + su.read_file(tmp_out_file))
subprocess.check_call(['chmod', '+x', out_file])
def create_initializers(params):
"""Create initializers recursively under current subdirectory."""
assert not params
for root, _unused_dirs, _unused_files in os.walk('.'):
if root == '.':
continue
init_file = os.path.join(root, INIT_FILE_NAME)
if su.path_exists(init_file):
continue
su.write_file(init_file, '')
def coding_guidelines_check(file_list):
"""Check other desired coding guidelines like shebang, file execution
permission and coding standards not coverted by pylint/pip."""
for source in file_list:
line_number = 0
if os.access(source, os.X_OK):
raise Error('Disable execution permissions for {}.'.format(source))
for line in su.read_file(source).split('\n'):
line_number += 1
if line_number == 1 and line.startswith(SHEBANG_SYMBOL):
raise Error('Remove shebang header from {} file.'.format(source))
if line.endswith('\\'):
raise Error(('Line continuation not allowed using backslash in file'
' {}:{}').format(source, line_number))
def _get_lint_commands(rule_details):
"""Get lint and other static code check commands from sources."""
lint_commands = []
src_files = [su.get_relative_path(rule_details[su.POSSIBLE_PREFIXES_KEY], f)
for f in rule_details[su.SRCS_KEY]]
for file_path in src_files:
if INIT_FILE_NAME == os.path.basename(file_path):
continue
lint_commands.append(
[su.PYTHON_PYLINT_CHECK, '--rcfile=' + su.PYLINT_RC_FILE, file_path])
pep8_command = su.PEP8_COMMAND_LINE[:]
pep8_command.append(file_path)
lint_commands.append(pep8_command)
return lint_commands
def _get_all_deps(rule_details, details_map):
"""Get all link libraries for a Python build rule."""
link_libs = []
dep_sources = []
for rule_symbol in rule_details[su.ALL_DEPS_KEY]:
if rule_symbol == rule_details[su.SYMBOL_KEY]:
continue
link_libs.append(details_map[rule_symbol][su.OUT_KEY])
dep_sources.extend(details_map[rule_symbol][su.SRCS_KEY])
link_libs = sorted(list(set(link_libs)))
return link_libs, dep_sources
class PyCommon(object):
"""Common Python handler functions."""
@classmethod
def _set_all_dep_paths(cls, rule_details, link_libs, dep_sources):
"""Set all dependency paths list for the rule."""
all_dep_paths = rule_details[su.SRCS_KEY][:]
all_dep_paths.extend(link_libs)
all_dep_paths.extend(dep_sources)
all_dep_paths.append(rule_details[su.OUT_KEY])
rule_details[su.ALL_DEP_PATHS_KEY].extend(sorted(list(set(all_dep_paths))))
@classmethod
def _internal_setup(cls, rule_details, details_map, is_test):
"""Initializing build rule dictionary."""
su.init_rule_common(rule_details, rule_details[su.NAME_KEY], [su.SRCS_KEY])
su.set_workdir_child(rule_details, su.WDIR_SRC_KEY, 'code')
su.set_workdir_child(rule_details, su.WDIR_TARGET_KEY, 'target')
skip_lint = rule_details.get(su.PYTHON_SKIPLINT_KEY, 'False').lower()
rule_details[su.PYTHON_SKIPLINT_KEY] = (skip_lint == 'true')
if is_test:
rule_details[su.TEST_COMMANDS_KEY] = [[rule_details[su.OUT_KEY]]]
rule_details[su.POSSIBLE_PREFIXES_KEY] = su.prefix_transform([])
link_libs, dep_sources = _get_all_deps(rule_details, details_map)
cls._set_all_dep_paths(rule_details, link_libs, dep_sources)
rule_details[su.LINK_LIBS_KEY] = link_libs
cls._set_link_command(rule_details)
@classmethod
def _set_link_command(cls, rule_details):
"""Set Python link command."""
main_class = rule_details.get(su.MAIN_METHOD_KEY,
su.PYTHON_FAKE_MAIN_METHOD)
link_commands = [[su.CHANGE_CURR_DIR, rule_details[su.WDIR_SRC_KEY]]]
link_libs = rule_details[su.LINK_LIBS_KEY]
for link_lib in link_libs:
link_commands.append([su.PYTHON_EXPAND_LIB, link_lib, True])
if not rule_details[su.PYTHON_SKIPLINT_KEY]:
link_commands.extend(_get_lint_commands(rule_details))
for pc_dep in rule_details.get(su.PC_DEPS_KEY, []):
pc_dep = su.expand_env_vars(pc_dep)
link_commands.append([su.PYTHON_EXPAND_LIB, pc_dep, False])
if su.PYTHON_BIN_TYPE == rule_details[su.TYPE_KEY]:
link_commands.append([su.PYTHON_COMPILE_ALL_CURRDIR])
tmp_out_file = os.path.join(rule_details[su.WDIR_TARGET_KEY],
'.tmp.' + rule_details[su.NAME_KEY])
link_commands.append(
[su.PYTHON_LINK_ALL, rule_details[su.TYPE_KEY],
main_class, tmp_out_file, rule_details[su.OUT_KEY]])
rule_details[su.LINK_COMMANDS_KEY] = link_commands
@classmethod
def build_commands(cls, rule_details):
"""Generate build command line."""
coding_guidelines_check(rule_details.get(su.SRCS_KEY, []))
logging.info('Emitting %s at %s', rule_details[su.TYPE_KEY],
su.log_normalize(rule_details[su.OUT_KEY]))
directory_list = [rule_details[su.OUTDIR_KEY],
rule_details[su.WDIR_SRC_KEY],
rule_details[su.WDIR_TARGET_KEY]]
command_list = [su.get_mkdir_command(d) for d in directory_list]
command_list.append([su.CHANGE_CURR_DIR, rule_details[su.WDIR_SRC_KEY]])
# File linking is more efficient than copying. However pylint does not
# honor soft links. At the cost of performance, the only option here is to
# actually copy the files to the build directory.
command_list.extend(su.cp_commands_list(rule_details, su.SRCS_KEY,
use_links=False))
command_list.append([su.PYTHON_CREATE_INITIALIZERS])
command_list.extend(rule_details[su.LINK_COMMANDS_KEY])
return command_list
class PyLibrary(PyCommon):
"""Handler class for Python lib build rules."""
@classmethod
def setup(cls, rule_details, details_map):
"""Full setup of the rule."""
cls._internal_setup(rule_details, details_map, False)
class PyBinary(PyCommon):
"""Handler class for Python binary build rules."""
@classmethod
def setup(cls, rule_details, details_map):
"""Full setup of the rule."""
cls._internal_setup(rule_details, details_map, False)
class PyPyTest(PyCommon):
"""Handler class for Python test build rules."""
@classmethod
def setup(cls, rule_details, details_map):
"""Full setup of the rule."""
cls._internal_setup(rule_details, details_map, True)
|
|
import sublime, sublime_plugin
from threading import Timer
import threading
### set global variables
atshape = "non-rotated"
baseline = 0
forceshapelimit = 5
game = 0
shapesPlayed = 0
shapeheight = 0
shapewidth = 0
solidifiedRegions = []
text = []
shapeDetails = {}
gameOver = 0
illegalMove = 0
forceSolidify = 0
class playCommand(sublime_plugin.TextCommand):
### gamestates:
###
### 0 : new game
### 1 : game running
def run(self, edit, direction="down", gamestate=1, reset=0):
global gameOver
### reset game
if reset==1:
self.view.run_command("reset")
### only do stuff if game is not over
if gameOver == 0:
### get global variables
global baseline
global shapeheight
global shapewidth
global forceSolidify
x1 = int(self.view.substr(self.view.find("(?<=cx\=)[\d]*", 0)))
y1 = int(self.view.substr(self.view.find("(?<=cy\=)[\d]*", 0)))
baseline = y1
### start new game
if gamestate==0:
#game started
global game
game = 1
x1 = 1
y1 = 1
sublime.set_timeout(self.descend, 2000)
self.write("G", 2555)
self.write("A", 2560)
self.write("M", 2565)
self.write("E", 2571)
self.write("O", 2581)
self.write("N", 2586)
### do a test run to see if next move is illegal
if direction == "down" and y1 < (29-shapeheight):
y1 += 1
# elif direction == "up" and y1 > 1:
# self.rotate()
elif direction == "left" and x1 > 1:
x1 -= 1
elif direction == "right" and x1 < (25-shapewidth):
x1 += 1
isMoveIllegal = self.buildShape_BAR(x1,y1,direction,1)
print direction + " " + str(isMoveIllegal)
### revert x and y to prepare for actual move
if gamestate == 0:
x1 = 1
y1 = 1
else:
x1 = int(self.view.substr(self.view.find("(?<=cx\=)[\d]*", 0)))
y1 = int(self.view.substr(self.view.find("(?<=cy\=)[\d]*", 0)))
### do the actual move the previous validity came back positive
if isMoveIllegal == 0:
if direction == "down" and y1 < (29-shapeheight):
y1 += 1
elif direction == "up" and y1 > 1:
self.rotate()
elif direction == "left" and x1 > 1:
x1 -= 1
elif direction == "right" and x1 < (25-shapewidth):
x1 += 1
self.view.replace(edit, self.view.find("cx\=[\d]*", 0), "cx="+str(x1))
self.view.replace(edit, self.view.find("cy\=[\d]*", 0), "cy="+str(y1))
x = self.buildShape_BAR(x1,y1,direction)
a = self.view.find_all(" ")
b = []
i = 0;
for g in a:
if str(g) in x:
b.append(g)
i += 1
self.view.add_regions("player1", b, "source", sublime.DRAW_OUTLINED)
elif isMoveIllegal == 1 and direction == "down":
forceSolidify = 1
def buildShape_BAR(self, x, y, direction, checkMoveValidity=0):
global shapeheight
global shapewidth
global shapeDetails
global forceshapelimit
global forceSolidify
global illegalMove
i = 0
z = []
illegalMove = 0
if not len(shapeDetails):
### build shape if new
shapeDetails = {
"maxPossibleShapeHeight" : 6,
"maxPossibleShapeWidth" : 6,
"allPossibleRotations" : ["non-rotated","rotated-90"],
"currentRotation" : "non-rotated",
"shapeRegions" : []
}
### build new shape
if y > 0:
if str(shapeDetails["currentRotation"]) == "non-rotated":
shapeheight = 6
shapewidth = 1
while i <= 5:
z.append("("+str((y+1)*100+y+x)+", "+str((y+1)*100+y+x+1)+")")
i += 1
y += 1
elif str(shapeDetails["currentRotation"]) == "rotated-90":
shapeheight = 1
shapewidth = 6
while i <= 5:
z.append("("+str((y+1)*100+y+x)+", "+str((y+1)*100+y+x+1)+")")
i += 1
x += 1
for shapeDotRegion in z:
for solidifiedRegion in self.view.get_regions("solidifiedplayer1"):
#print str(solidifiedRegion) + " == "+ str(shapeDotRegion)
if str(solidifiedRegion) == str(shapeDotRegion):
illegalMove = 1
break
if checkMoveValidity == 0 and illegalMove == 1 and direction == "down":
forceSolidify = 1
### store regions the shape is currently occupying
shapeDetails["shapeRegions"] = z
### if it's a validity check return is valid or not
if checkMoveValidity == 1:
return illegalMove
### otherwise return new shape
else:
return z
def descend(self):
global baseline
global shapesPlayed
global shapeheight
global forceshapelimit
global forceSolidify
global gameOver
self.view.run_command("play",{"direction" : "down"})
#print "descending"
if baseline < (29-shapeheight) and forceSolidify == 0:
sublime.set_timeout(self.descend, 2000)
else:
forceSolidify = 0
self.solidify()
shapesPlayed +=1
if shapesPlayed < forceshapelimit:
self.view.run_command("play",{"gamestate":0})
else:
self.gameover()
gameOver = 1
def solidify(self):
global solidifiedRegions
solidifiedRegions += self.view.get_regions("player1")
self.view.add_regions("solidifiedplayer1", solidifiedRegions, "source", sublime.DRAW_EMPTY)
def rotate(self):
global shapeDetails
for rotatePosition in shapeDetails["allPossibleRotations"]:
if shapeDetails["currentRotation"] == rotatePosition:
if shapeDetails["allPossibleRotations"].index(shapeDetails["currentRotation"]) == (len(shapeDetails["allPossibleRotations"])-1):
shapeDetails["currentRotation"] = shapeDetails["allPossibleRotations"][0]
else:
shapeDetails["currentRotation"] = shapeDetails["allPossibleRotations"][(shapeDetails["allPossibleRotations"].index(shapeDetails["currentRotation"])+1)]
break
def gameover(self):
global text
global game
game = 0
text = []
self.write("G", 330)
self.write("A", 335)
self.write("M", 340)
self.write("E", 346)
self.write("O", 355)
self.write("V", 360)
self.write("E", 366)
self.write("R", 371)
def write(self, letter, p):
global text
sp = self.view.find_all(" ")
if letter == "G":
#G ["(10, 11)","(11, 12)","(12, 13)","(13, 14)","(111, 112)","(212, 213)","(313, 314)","(414, 415)","(415, 416)","(416, 417)","(417, 418)","(316, 317)","(215, 216)","(214, 215)"]
text += ["("+str(p)+", "+str(p+1)+")","("+str(p+1)+", "+str(p+2)+")","("+str(p+2)+", "+str(p+3)+")","("+str(p+3)+", "+str(p+4)+")","("+str(p+101)+", "+str(p+102)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+303)+", "+str(p+304)+")","("+str(p+404)+", "+str(p+405)+")","("+str(p+405)+", "+str(p+406)+")","("+str(p+406)+", "+str(p+407)+")","("+str(p+407)+", "+str(p+408)+")","("+str(p+306)+", "+str(p+307)+")","("+str(p+205)+", "+str(p+206)+")","("+str(p+204)+", "+str(p+205)+")"]
elif letter == "A":
#A ["(419, 420)","(318, 319)","(217, 218)","(116, 117)","(15, 16)","(16, 17)","(17, 18)","(18, 19)","(119, 120)","(220, 221)","(321, 322)","(422, 423)","(218, 219)","(219, 220)"]
text += ["("+str(p+404)+", "+str(p+405)+")","("+str(p+303)+", "+str(p+304)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+101)+", "+str(p+102)+")","("+str(p)+", "+str(p+1)+")","("+str(p+1)+", "+str(p+2)+")","("+str(p+2)+", "+str(p+3)+")","("+str(p+3)+", "+str(p+4)+")","("+str(p+104)+", "+str(p+105)+")","("+str(p+205)+", "+str(p+206)+")","("+str(p+306)+", "+str(p+307)+")","("+str(p+407)+", "+str(p+408)+")","("+str(p+203)+", "+str(p+204)+")","("+str(p+204)+", "+str(p+205)+")"]
elif letter == "M":
#M ["(424, 425)","(323, 324)","(222, 223)","(121, 122)","(20, 21)","(122, 123)","(224, 225)","(124, 125)","(24, 25)","(125, 126)","(226, 227)","(327, 328)","(428, 429)"]
text += ["("+str(p+404)+", "+str(p+405)+")","("+str(p+303)+", "+str(p+304)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+101)+", "+str(p+102)+")","("+str(p)+", "+str(p+1)+")","("+str(p+102)+", "+str(p+103)+")","("+str(p+204)+", "+str(p+205)+")","("+str(p+104)+", "+str(p+105)+")","("+str(p+4)+", "+str(p+5)+")","("+str(p+105)+", "+str(p+106)+")","("+str(p+206)+", "+str(p+207)+")","("+str(p+307)+", "+str(p+308)+")","("+str(p+408)+", "+str(p+409)+")"]
elif letter == "N":
#M ["(424, 425)","(323, 324)","(222, 223)","(121, 122)","(20, 21)","(122, 123)","(224, 225)","(124, 125)","(24, 25)","(125, 126)","(226, 227)","(327, 328)","(428, 429)"]
text += ["("+str(p+404)+", "+str(p+405)+")","("+str(p+303)+", "+str(p+304)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+101)+", "+str(p+102)+")","("+str(p)+", "+str(p+1)+")","("+str(p+102)+", "+str(p+103)+")","("+str(p+204)+", "+str(p+205)+")","("+str(p+306)+", "+str(p+307)+")","("+str(p+4)+", "+str(p+5)+")","("+str(p+105)+", "+str(p+106)+")","("+str(p+206)+", "+str(p+207)+")","("+str(p+307)+", "+str(p+308)+")","("+str(p+408)+", "+str(p+409)+")"]
elif letter == "E":
#E ["(430, 431)","(431, 432)","(432, 433)","(433, 434)","(329, 330)","(228, 229)","(229, 230)","(230, 231)","(231, 232)","(127, 128)","(26, 27)","(27, 28)","(28, 29)","(29, 30)"]
text += ["("+str(p+404)+", "+str(p+405)+")","("+str(p+405)+", "+str(p+406)+")","("+str(p+406)+", "+str(p+407)+")","("+str(p+407)+", "+str(p+408)+")","("+str(p+303)+", "+str(p+304)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+203)+", "+str(p+204)+")","("+str(p+204)+", "+str(p+205)+")","("+str(p+205)+", "+str(p+206)+")","("+str(p+101)+", "+str(p+102)+")","("+str(p)+", "+str(p+1)+")","("+str(p+1)+", "+str(p+2)+")","("+str(p+2)+", "+str(p+3)+")","("+str(p+3)+", "+str(p+4)+")"]
elif letter == "O":
#O ["(36, 37)","(37, 38)","(38, 39)","(39, 40)","(140, 141)","(241, 242)","(342, 343)","(443, 444)","(442, 443)","(441, 442)","(440, 441)","(339, 340)","(238, 239)","(137, 138)"]
text += ["("+str(p)+", "+str(p+1)+")","("+str(p+1)+", "+str(p+2)+")","("+str(p+2)+", "+str(p+3)+")","("+str(p+3)+", "+str(p+4)+")","("+str(p+104)+", "+str(p+105)+")","("+str(p+205)+", "+str(p+206)+")","("+str(p+306)+", "+str(p+307)+")","("+str(p+407)+", "+str(p+408)+")","("+str(p+406)+", "+str(p+407)+")","("+str(p+405)+", "+str(p+406)+")","("+str(p+404)+", "+str(p+405)+")","("+str(p+303)+", "+str(p+304)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+101)+", "+str(p+102)+")"]
if letter == "V":
#V ["(41, 42)","(142, 143)","(243, 244)","(345, 346)","(447, 448)","(347, 348)","(247, 248)","(146, 147)","(45, 46)"]
text += ["("+str(p)+", "+str(p+1)+")","("+str(p+101)+", "+str(p+102)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+304)+", "+str(p+305)+")","("+str(p+406)+", "+str(p+407)+")","("+str(p+306)+", "+str(p+307)+")","("+str(p+206)+", "+str(p+207)+")","("+str(p+105)+", "+str(p+106)+")","("+str(p+4)+", "+str(p+5)+")"]
if letter == "R":
#R
text += ["("+str(p)+", "+str(p+1)+")","("+str(p+1)+", "+str(p+2)+")","("+str(p+2)+", "+str(p+3)+")","("+str(p+3)+", "+str(p+4)+")","("+str(p+101)+", "+str(p+102)+")","("+str(p+202)+", "+str(p+203)+")","("+str(p+203)+", "+str(p+204)+")","("+str(p+204)+", "+str(p+205)+")","("+str(p+205)+", "+str(p+206)+")","("+str(p+104)+", "+str(p+105)+")","("+str(p+303)+", "+str(p+304)+")","("+str(p+404)+", "+str(p+405)+")","("+str(p+305)+", "+str(p+306)+")","("+str(p+407)+", "+str(p+408)+")"]
r = []
for region in sp:
if str(region) in text:
r.append(region)
self.view.add_regions("gameoverregion", r, "source", sublime.DRAW_EMPTY)
class resetCommand(sublime_plugin.TextCommand):
def run(self, edit):
global solidifiedRegions
global shapesPlayed
global text
global gameOver
text = []
solidifiedRegions = []
shapesPlayed = 0
gameOver = 0
self.view.erase_regions("solidifiedplayer1")
self.view.erase_regions("gameoverregion")
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import logging
import numpy as np
from functools import partial
from ray.tune.registry import RLLIB_MODEL, RLLIB_PREPROCESSOR, \
RLLIB_ACTION_DIST, _global_registry
from ray.rllib.models.extra_spaces import Simplex
from ray.rllib.models.torch.torch_action_dist import (TorchCategorical,
TorchDiagGaussian)
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork as FCNetV2
from ray.rllib.models.tf.visionnet_v2 import VisionNetwork as VisionNetV2
from ray.rllib.models.tf.tf_action_dist import (
Categorical, MultiCategorical, Deterministic, DiagGaussian,
MultiActionDistribution, Dirichlet)
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork
from ray.rllib.models.tf.lstm_v1 import LSTM
from ray.rllib.models.tf.modelv1_compat import make_v1_wrapper
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.visionnet_v1 import VisionNetwork
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI
from ray.rllib.utils.error import UnsupportedSpaceException
tf = try_import_tf()
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
MODEL_DEFAULTS = {
# === Built-in options ===
# Filter config. List of [out_channels, kernel, stride] for each filter
"conv_filters": None,
# Nonlinearity for built-in convnet
"conv_activation": "relu",
# Nonlinearity for fully connected net (tanh, relu)
"fcnet_activation": "tanh",
# Number of hidden layers for fully connected net
"fcnet_hiddens": [256, 256],
# For control envs, documented in ray.rllib.models.Model
"free_log_std": False,
# Whether to skip the final linear layer used to resize the hidden layer
# outputs to size `num_outputs`. If True, then the last hidden layer
# should already match num_outputs.
"no_final_linear": False,
# Whether layers should be shared for the value function.
"vf_share_layers": True,
# == LSTM ==
# Whether to wrap the model with a LSTM
"use_lstm": False,
# Max seq len for training the LSTM, defaults to 20
"max_seq_len": 20,
# Size of the LSTM cell
"lstm_cell_size": 256,
# Whether to feed a_{t-1}, r_{t-1} to LSTM
"lstm_use_prev_action_reward": False,
# When using modelv1 models with a modelv2 algorithm, you may have to
# define the state shape here (e.g., [256, 256]).
"state_shape": None,
# == Atari ==
# Whether to enable framestack for Atari envs
"framestack": True,
# Final resized frame dimension
"dim": 84,
# (deprecated) Converts ATARI frame to 1 Channel Grayscale image
"grayscale": False,
# (deprecated) Changes frame to range from [-1, 1] if true
"zero_mean": True,
# === Options for custom models ===
# Name of a custom preprocessor to use
"custom_preprocessor": None,
# Name of a custom model to use
"custom_model": None,
# Name of a custom action distribution to use
"custom_action_dist": None,
# Extra options to pass to the custom classes
"custom_options": {},
}
# __sphinx_doc_end__
# yapf: enable
@PublicAPI
class ModelCatalog(object):
"""Registry of models, preprocessors, and action distributions for envs.
Examples:
>>> prep = ModelCatalog.get_preprocessor(env)
>>> observation = prep.transform(raw_observation)
>>> dist_class, dist_dim = ModelCatalog.get_action_dist(
env.action_space, {})
>>> model = ModelCatalog.get_model(inputs, dist_dim, options)
>>> dist = dist_class(model.outputs, model)
>>> action = dist.sample()
"""
@staticmethod
@DeveloperAPI
def get_action_dist(action_space, config, dist_type=None, torch=False):
"""Returns action distribution class and size for the given action space.
Args:
action_space (Space): Action space of the target gym env.
config (dict): Optional model config.
dist_type (str): Optional identifier of the action distribution.
torch (bool): Optional whether to return PyTorch distribution.
Returns:
dist_class (ActionDistribution): Python class of the distribution.
dist_dim (int): The size of the input vector to the distribution.
"""
config = config or MODEL_DEFAULTS
if config.get("custom_action_dist"):
action_dist_name = config["custom_action_dist"]
logger.debug(
"Using custom action distribution {}".format(action_dist_name))
dist = _global_registry.get(RLLIB_ACTION_DIST, action_dist_name)
elif isinstance(action_space, gym.spaces.Box):
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a custom action distribution, "
"using a Tuple action space, or the multi-agent API.")
if dist_type is None:
dist = TorchDiagGaussian if torch else DiagGaussian
elif dist_type == "deterministic":
dist = Deterministic
elif isinstance(action_space, gym.spaces.Discrete):
dist = TorchCategorical if torch else Categorical
elif isinstance(action_space, gym.spaces.Tuple):
if torch:
raise NotImplementedError("Tuple action spaces not supported "
"for Pytorch.")
child_dist = []
input_lens = []
for action in action_space.spaces:
dist, action_size = ModelCatalog.get_action_dist(
action, config)
child_dist.append(dist)
input_lens.append(action_size)
return partial(
MultiActionDistribution,
child_distributions=child_dist,
action_space=action_space,
input_lens=input_lens), sum(input_lens)
elif isinstance(action_space, Simplex):
if torch:
raise NotImplementedError("Simplex action spaces not "
"supported for Pytorch.")
dist = Dirichlet
elif isinstance(action_space, gym.spaces.MultiDiscrete):
if torch:
raise NotImplementedError("MultiDiscrete action spaces not "
"supported for Pytorch.")
return partial(MultiCategorical, input_lens=action_space.nvec), \
int(sum(action_space.nvec))
return dist, dist.required_model_output_shape(action_space, config)
raise NotImplementedError("Unsupported args: {} {}".format(
action_space, dist_type))
@staticmethod
@DeveloperAPI
def get_action_shape(action_space):
"""Returns action tensor dtype and shape for the action space.
Args:
action_space (Space): Action space of the target gym env.
Returns:
(dtype, shape): Dtype and shape of the actions tensor.
"""
if isinstance(action_space, gym.spaces.Discrete):
return (tf.int64, (None, ))
elif isinstance(action_space, (gym.spaces.Box, Simplex)):
return (tf.float32, (None, ) + action_space.shape)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
return (tf.as_dtype(action_space.dtype),
(None, ) + action_space.shape)
elif isinstance(action_space, gym.spaces.Tuple):
size = 0
all_discrete = True
for i in range(len(action_space.spaces)):
if isinstance(action_space.spaces[i], gym.spaces.Discrete):
size += 1
else:
all_discrete = False
size += np.product(action_space.spaces[i].shape)
return (tf.int64 if all_discrete else tf.float32, (None, size))
else:
raise NotImplementedError("action space {}"
" not supported".format(action_space))
@staticmethod
@DeveloperAPI
def get_action_placeholder(action_space):
"""Returns an action placeholder consistent with the action space
Args:
action_space (Space): Action space of the target gym env.
Returns:
action_placeholder (Tensor): A placeholder for the actions
"""
dtype, shape = ModelCatalog.get_action_shape(action_space)
return tf.placeholder(dtype, shape=shape, name="action")
@staticmethod
@DeveloperAPI
def get_model_v2(obs_space,
action_space,
num_outputs,
model_config,
framework,
name="default_model",
model_interface=None,
default_model=None,
**model_kwargs):
"""Returns a suitable model compatible with given spaces and output.
Args:
obs_space (Space): Observation space of the target gym env. This
may have an `original_space` attribute that specifies how to
unflatten the tensor into a ragged tensor.
action_space (Space): Action space of the target gym env.
num_outputs (int): The size of the output vector of the model.
framework (str): Either "tf" or "torch".
name (str): Name (scope) for the model.
model_interface (cls): Interface required for the model
default_model (cls): Override the default class for the model. This
only has an effect when not using a custom model
model_kwargs (dict): args to pass to the ModelV2 constructor
Returns:
model (ModelV2): Model to use for the policy.
"""
if model_config.get("custom_model"):
model_cls = _global_registry.get(RLLIB_MODEL,
model_config["custom_model"])
if issubclass(model_cls, ModelV2):
if model_interface and not issubclass(model_cls,
model_interface):
raise ValueError("The given model must subclass",
model_interface)
if framework == "tf":
created = set()
# Track and warn if vars were created but not registered
def track_var_creation(next_creator, **kw):
v = next_creator(**kw)
created.add(v)
return v
with tf.variable_creator_scope(track_var_creation):
instance = model_cls(obs_space, action_space,
num_outputs, model_config, name,
**model_kwargs)
registered = set(instance.variables())
not_registered = set()
for var in created:
if var not in registered:
not_registered.add(var)
if not_registered:
raise ValueError(
"It looks like variables {} were created as part "
"of {} but does not appear in model.variables() "
"({}). Did you forget to call "
"model.register_variables() on the variables in "
"question?".format(not_registered, instance,
registered))
else:
# no variable tracking
instance = model_cls(obs_space, action_space, num_outputs,
model_config, name, **model_kwargs)
return instance
elif tf.executing_eagerly():
raise ValueError(
"Eager execution requires a TFModelV2 model to be "
"used, however you specified a custom model {}".format(
model_cls))
if framework == "tf":
v2_class = None
# try to get a default v2 model
if not model_config.get("custom_model"):
v2_class = default_model or ModelCatalog._get_v2_model(
obs_space, model_config)
# fallback to a default v1 model
if v2_class is None:
if tf.executing_eagerly():
raise ValueError(
"Eager execution requires a TFModelV2 model to be "
"used, however there is no default V2 model for this "
"observation space: {}, use_lstm={}".format(
obs_space, model_config.get("use_lstm")))
v2_class = make_v1_wrapper(ModelCatalog.get_model)
# wrap in the requested interface
wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
return wrapper(obs_space, action_space, num_outputs, model_config,
name, **model_kwargs)
elif framework == "torch":
if default_model:
return default_model(obs_space, action_space, num_outputs,
model_config, name)
return ModelCatalog._get_default_torch_model_v2(
obs_space, action_space, num_outputs, model_config, name)
else:
raise NotImplementedError(
"Framework must be 'tf' or 'torch': {}".format(framework))
@staticmethod
@DeveloperAPI
def get_preprocessor(env, options=None):
"""Returns a suitable preprocessor for the given env.
This is a wrapper for get_preprocessor_for_space().
"""
return ModelCatalog.get_preprocessor_for_space(env.observation_space,
options)
@staticmethod
@DeveloperAPI
def get_preprocessor_for_space(observation_space, options=None):
"""Returns a suitable preprocessor for the given observation space.
Args:
observation_space (Space): The input observation space.
options (dict): Options to pass to the preprocessor.
Returns:
preprocessor (Preprocessor): Preprocessor for the observations.
"""
options = options or MODEL_DEFAULTS
for k in options.keys():
if k not in MODEL_DEFAULTS:
raise Exception("Unknown config key `{}`, all keys: {}".format(
k, list(MODEL_DEFAULTS)))
if options.get("custom_preprocessor"):
preprocessor = options["custom_preprocessor"]
logger.info("Using custom preprocessor {}".format(preprocessor))
prep = _global_registry.get(RLLIB_PREPROCESSOR, preprocessor)(
observation_space, options)
else:
cls = get_preprocessor(observation_space)
prep = cls(observation_space, options)
logger.debug("Created preprocessor {}: {} -> {}".format(
prep, observation_space, prep.shape))
return prep
@staticmethod
@PublicAPI
def register_custom_preprocessor(preprocessor_name, preprocessor_class):
"""Register a custom preprocessor class by name.
The preprocessor can be later used by specifying
{"custom_preprocessor": preprocesor_name} in the model config.
Args:
preprocessor_name (str): Name to register the preprocessor under.
preprocessor_class (type): Python class of the preprocessor.
"""
_global_registry.register(RLLIB_PREPROCESSOR, preprocessor_name,
preprocessor_class)
@staticmethod
@PublicAPI
def register_custom_model(model_name, model_class):
"""Register a custom model class by name.
The model can be later used by specifying {"custom_model": model_name}
in the model config.
Args:
model_name (str): Name to register the model under.
model_class (type): Python class of the model.
"""
_global_registry.register(RLLIB_MODEL, model_name, model_class)
@staticmethod
@PublicAPI
def register_custom_action_dist(action_dist_name, action_dist_class):
"""Register a custom action distribution class by name.
The model can be later used by specifying
{"custom_action_dist": action_dist_name} in the model config.
Args:
model_name (str): Name to register the action distribution under.
model_class (type): Python class of the action distribution.
"""
_global_registry.register(RLLIB_ACTION_DIST, action_dist_name,
action_dist_class)
@staticmethod
def _wrap_if_needed(model_cls, model_interface):
assert issubclass(model_cls, TFModelV2), model_cls
if not model_interface or issubclass(model_cls, model_interface):
return model_cls
class wrapper(model_interface, model_cls):
pass
name = "{}_as_{}".format(model_cls.__name__, model_interface.__name__)
wrapper.__name__ = name
wrapper.__qualname__ = name
return wrapper
@staticmethod
def _get_default_torch_model_v2(obs_space, action_space, num_outputs,
model_config, name):
from ray.rllib.models.torch.fcnet import (FullyConnectedNetwork as
PyTorchFCNet)
from ray.rllib.models.torch.visionnet import (VisionNetwork as
PyTorchVisionNet)
model_config = model_config or MODEL_DEFAULTS
if model_config.get("use_lstm"):
raise NotImplementedError(
"LSTM auto-wrapping not implemented for torch")
if isinstance(obs_space, gym.spaces.Discrete):
obs_rank = 1
else:
obs_rank = len(obs_space.shape)
if obs_rank > 2:
return PyTorchVisionNet(obs_space, action_space, num_outputs,
model_config, name)
return PyTorchFCNet(obs_space, action_space, num_outputs, model_config,
name)
@staticmethod
def get_model(input_dict,
obs_space,
action_space,
num_outputs,
options,
state_in=None,
seq_lens=None):
"""Deprecated: use get_model_v2() instead."""
assert isinstance(input_dict, dict)
options = options or MODEL_DEFAULTS
model = ModelCatalog._get_model(input_dict, obs_space, action_space,
num_outputs, options, state_in,
seq_lens)
if options.get("use_lstm"):
copy = dict(input_dict)
copy["obs"] = model.last_layer
feature_space = gym.spaces.Box(
-1, 1, shape=(model.last_layer.shape[1], ))
model = LSTM(copy, feature_space, action_space, num_outputs,
options, state_in, seq_lens)
logger.debug(
"Created model {}: ({} of {}, {}, {}, {}) -> {}, {}".format(
model, input_dict, obs_space, action_space, state_in, seq_lens,
model.outputs, model.state_out))
model._validate_output_shape()
return model
@staticmethod
def _get_model(input_dict, obs_space, action_space, num_outputs, options,
state_in, seq_lens):
if options.get("custom_model"):
model = options["custom_model"]
logger.debug("Using custom model {}".format(model))
return _global_registry.get(RLLIB_MODEL, model)(
input_dict,
obs_space,
action_space,
num_outputs,
options,
state_in=state_in,
seq_lens=seq_lens)
obs_rank = len(input_dict["obs"].shape) - 1
if obs_rank > 2:
return VisionNetwork(input_dict, obs_space, action_space,
num_outputs, options)
return FullyConnectedNetwork(input_dict, obs_space, action_space,
num_outputs, options)
@staticmethod
def _get_v2_model(obs_space, options):
options = options or MODEL_DEFAULTS
obs_rank = len(obs_space.shape) - 1
if options.get("use_lstm"):
return None # TODO: default LSTM v2 not implemented
if obs_rank > 2:
return VisionNetV2
return FCNetV2
@staticmethod
def get_torch_model(obs_space,
num_outputs,
options=None,
default_model_cls=None):
raise DeprecationWarning("Please use get_model_v2() instead.")
|
|
'''
MIT License
Copyright (c) 2017 Matej Usaj
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Created on Mar 21, 2017
@author: Matej Usaj
'''
from itertools import chain
import logging
import os
import h5py
import numpy as np
import pandas as p
from .toolbox import correlation
from .toolbox.utils import hdf5_read_str_list, read_strain_map
from .toolbox.table_norm import table_normalize
logger = logging.getLogger(__name__)
class Similarity(object):
'''
'''
INPUT_FORMAT_BJV = 'bjv'
INPUT_FORMAT_TXT = 'txt'
def __init__(self, input_path1, output_path, input_path2=None, input_format=INPUT_FORMAT_TXT,
strain_map=None):
'''
Constructor
'''
load_func = '_load_%s' % (input_format,)
if not hasattr(self, load_func):
raise Exception('Unknown input format "%s"' % (input_format,))
self.output = output_path
load_func = getattr(self, load_func)
if not os.path.exists(input_path1) or not os.path.isfile(input_path1):
raise Exception('Input file does not exist "%s"' % (input_path1,))
if input_path2 and (not os.path.exists(input_path2) or not os.path.isfile(input_path2)):
raise Exception('Input file does not exist "%s"' % (input_path2,))
if input_format == self.INPUT_FORMAT_BJV and not strain_map:
raise Exception("Ben's format requires strain-allele mapping file")
if isinstance(strain_map, str):
logger.info("Reading strain map")
strain_map = read_strain_map(strain_map)
logger.debug("Found %d strain mappings", len(strain_map))
self.strain_map = strain_map
logger.info("Loading data")
load_func(input_path1, input_path2)
logger.info("Data loaded. TS matrix size is %s, FG matrix size is %s", self.ts_data.shape, self.fg_data.shape)
def _bjv_read_scores(self, dataset, root_ele):
logger.debug("BJV-%s:Reading ORF list", root_ele)
# orfs = np.array(list(map(lambda x: x.split('_')[1], hdf5_read_str_list(dataset, dataset.get('%s/Cannon/Orf' % (root_ele,))[0]))))
orfs = np.array(hdf5_read_str_list(dataset, dataset.get('%s/Cannon/Orf' % (root_ele,))[0]))
logger.debug("BJV-%s:Generating query/array indices", root_ele)
query_idx = np.extract(np.array(dataset.get('%s/Cannon/isQuery' % (root_ele,))[0], dtype=bool), np.arange(*orfs.shape))
array_idx = np.extract(np.array(dataset.get('%s/Cannon/isArray' % (root_ele,)), dtype=bool).T, np.arange(*orfs.shape))
logger.debug("BJV-%s:Creating DataFrame", root_ele)
return p.DataFrame(
dataset.get('%s/eps' % (root_ele,))[array_idx, :][:, query_idx],
index=orfs[array_idx],
columns=orfs[query_idx])
def _load_bjv(self, inp, _):
logger.debug("Reading BJV formatted file %s", inp)
dataset = h5py.File(inp, 'r')
logger.debug("BJV:Reading TS data")
self.ts_data = self._bjv_read_scores(dataset, 'ts_merge')
logger.debug("BJV:Reading FG data")
self.fg_data = self._bjv_read_scores(dataset, 'fg_merge')
def _load_txt(self, ints, infg):
raise NotImplementedError('Loading text file is not implemented yet')
def _similarity(self, data):
logger.info("Input matrix size is %s.", data.shape)
logger.debug("Computing row similarity on %d rows.", data.shape[0])
corr_rows = correlation.correlation(data, axis='rows')
logger.debug("Computing column similarity on %d columns.", data.shape[1])
corr_cols = correlation.correlation(data, axis='columns')
if self.strain_map:
logger.debug("Replacing strain ids with allele names.")
corr_rows.columns = [self.strain_map[c.split('_')[1]] for c in corr_rows.columns]
corr_rows.index = [self.strain_map[c.split('_')[1]] for c in corr_rows.index]
corr_cols.columns = [self.strain_map[c.split('_')[1]] for c in corr_cols.columns]
corr_cols.index = [self.strain_map[c.split('_')[1]] for c in corr_cols.index]
unified_axis = sorted(set(list(corr_cols.index) + list(corr_rows.index)))
logger.debug("Unified axis (%d) ready.", len(unified_axis))
merged = np.array([
corr_cols.reindex(unified_axis,unified_axis).values,
corr_rows.reindex(unified_axis,unified_axis).values])
logger.debug("Combining QQ/AA correlations.")
return p.DataFrame(np.nanmean(merged, axis=0), index=unified_axis, columns=unified_axis)
def essential_similarity(self):
logger.info("Computing similarity of essental strains profiles.")
data = self.ts_data.loc[
[c for c in self.ts_data.index if '_tsa' in c],
[c for c in self.ts_data.columns if '_tsq' in c]]
self.ts_sim = self._similarity(data)
return self.ts_sim
def nonessential_similarity(self):
logger.info("Computing similarity of nonessental strains profiles.")
data = self.fg_data.loc[
[c for c in self.fg_data.index if '_dma' in c],
[c for c in self.fg_data.columns if '_sn' in c]]
self.fg_sim = self._similarity(data)
return self.fg_sim
def similarity(self):
tsdata = self.ts_data.loc[
:,
[c for c in self.ts_data.columns if ('_y' not in c and '_damp' not in c)]]
fgdata = self.fg_data.loc[
:,
[c for c in self.fg_data.columns if ('_y' not in c and '_damp' not in c)]]
# if self.strain_map:
# logger.debug("Replacing strain ids with allele names.")
# tsdata.columns = [self.strain_map[c] for c in tsdata.columns]
# tsdata.index = [self.strain_map[c] for c in tsdata.index]
# fgdata.columns = [self.strain_map[c] for c in fgdata.columns]
# fgdata.index = [self.strain_map[c] for c in fgdata.index]
#
# tsdata = tsdata.iloc[~tsdata.index.duplicated(), ~tsdata.columns.duplicated()]
# fgdata = fgdata.iloc[~fgdata.index.duplicated(), ~fgdata.columns.duplicated()]
#
# unified_axis = sorted(set(chain(tsdata.index, tsdata.columns, fgdata.index, fgdata.columns)))
# Layer 1: FG QQ
print('fg_qq')
fg_qq = correlation.correlation(fgdata, axis='columns')
# Layer 2: TS QQ (normalized based on TS AA)
print('ts_qq')
ts_qq = correlation.correlation(tsdata, axis='columns')
common_queries = set(fgdata.columns).intersection(tsdata.columns)
common_arrays = set(fgdata.index).intersection(tsdata.index)
# cc1 / data1
print('fg_aa')
tmp_fg_aa = correlation.correlation(fgdata.reindex(common_arrays, common_queries), axis='rows')
# cc2 / data2
print('ts_aa')
tmp_ts_aa = correlation.correlation(tsdata.reindex(common_arrays, common_queries), axis='rows')
print('normalize')
ts_qq_norm = table_normalize(tmp_fg_aa, tmp_ts_aa, ts_qq)
# Layer 3: FG AA
print('fg_aa')
fg_aa = correlation.correlation(fgdata, axis='rows')
# Layer 4: TS AA (normalized)
print('ts_aa')
ts_aa = correlation.correlation(tsdata, axis='rows')
print('normalize')
ts_aa_norm = table_normalize(tmp_fg_aa, tmp_ts_aa, ts_aa)
# return ts_qq_norm
def _save(self, df, path):
df.to_csv(path, sep='\t', index=True, header=True)
def save_essential_similarity(self, path=None):
path = path or os.path.join(self.output, 'cc_ExE.txt')
logger.info("Saving ExE dataset in %s.", path)
self._save(self.ts_sim, path)
def save_nonessential_similarity(self, path=None):
path = path or os.path.join(self.output, 'cc_NxE.txt')
logger.info("Saving ExE dataset in %s.", path)
self._save(self.fg_sim, path)
def main():
import argparse
parser = argparse.ArgumentParser(description='Compute similarity matrix.')
parser.add_argument('scores_file', help='First file with SGA scores. See formats for details')
parser.add_argument('scores_file_2', nargs='?', help='Optional second file with SGA scores. See formats for details')
parser.add_argument('output_folder', help='Where to save the results')
parser.add_argument('-f', '--input-format', dest='input_format',
choices=[Similarity.INPUT_FORMAT_TXT, Similarity.INPUT_FORMAT_BJV],
default=Similarity.INPUT_FORMAT_TXT,
help="""Format of the input files. If format is '%s' then the first file is TS dataset and second file is FG.
If format is '%s' then first file is the .mat workspace snapshot and the second input file should be omitted.""" % (Similarity.INPUT_FORMAT_TXT, Similarity.INPUT_FORMAT_BJV))
parser.add_argument('-m', '--strain-map', dest='strain_map',
help='Strain -> allele mapping file. Used if axis labels are strains and not alleles.'
'This file should have two tab separated columns and no header. Column A is strain id, column B is allele name. '
'ie: "tsq123{tab}bla1-123". '
'This option is mandatory for Ben\'s format.')
parser.add_argument('-l', '--log', dest='loglevel', default='INFO',
help='Log level to use')
parser.add_argument('-e', '--skip-essential', dest='exe', action='store_false',
help='Do not generate ExE correlations')
parser.add_argument('-n', '--skip-nonessential', dest='nxn', action='store_false',
help='Do not generate NxN correlations')
parser.add_argument('-a', '--skip-all', dest='all', action='store_false',
help='Do not generate ALL correlations')
args = parser.parse_args()
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
logging.basicConfig(level=numeric_level, format='%(asctime)s\t%(levelname)s:\t%(message)s')
similarity = Similarity(
args.scores_file,
args.output_folder,
args.scores_file_2,
args.input_format,
args.strain_map)
if args.exe:
similarity.essential_similarity()
similarity.save_essential_similarity()
if args.nxn:
similarity.nonessential_similarity()
similarity.save_nonessential_similarity()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3",
manifest={
"AudioEncoding",
"SpeechModelVariant",
"SsmlVoiceGender",
"OutputAudioEncoding",
"SpeechWordInfo",
"InputAudioConfig",
"VoiceSelectionParams",
"SynthesizeSpeechConfig",
"OutputAudioConfig",
},
)
class AudioEncoding(proto.Enum):
r"""Audio encoding of the audio content sent in the conversational query
request. Refer to the `Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics>`__
for more details.
"""
AUDIO_ENCODING_UNSPECIFIED = 0
AUDIO_ENCODING_LINEAR_16 = 1
AUDIO_ENCODING_FLAC = 2
AUDIO_ENCODING_MULAW = 3
AUDIO_ENCODING_AMR = 4
AUDIO_ENCODING_AMR_WB = 5
AUDIO_ENCODING_OGG_OPUS = 6
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7
class SpeechModelVariant(proto.Enum):
r"""Variant of the specified [Speech
model][google.cloud.dialogflow.cx.v3.InputAudioConfig.model] to use.
See the `Cloud Speech
documentation <https://cloud.google.com/speech-to-text/docs/enhanced-models>`__
for which models have different variants. For example, the
"phone_call" model has both a standard and an enhanced variant. When
you use an enhanced model, you will generally receive higher quality
results than for a standard model.
"""
SPEECH_MODEL_VARIANT_UNSPECIFIED = 0
USE_BEST_AVAILABLE = 1
USE_STANDARD = 2
USE_ENHANCED = 3
class SsmlVoiceGender(proto.Enum):
r"""Gender of the voice as described in `SSML voice
element <https://www.w3.org/TR/speech-synthesis11/#edef_voice>`__.
"""
SSML_VOICE_GENDER_UNSPECIFIED = 0
SSML_VOICE_GENDER_MALE = 1
SSML_VOICE_GENDER_FEMALE = 2
SSML_VOICE_GENDER_NEUTRAL = 3
class OutputAudioEncoding(proto.Enum):
r"""Audio encoding of the output audio format in Text-To-Speech."""
OUTPUT_AUDIO_ENCODING_UNSPECIFIED = 0
OUTPUT_AUDIO_ENCODING_LINEAR_16 = 1
OUTPUT_AUDIO_ENCODING_MP3 = 2
OUTPUT_AUDIO_ENCODING_MP3_64_KBPS = 4
OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3
OUTPUT_AUDIO_ENCODING_MULAW = 5
class SpeechWordInfo(proto.Message):
r"""Information for a word recognized by the speech recognizer.
Attributes:
word (str):
The word this info is for.
start_offset (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the
audio that corresponds to the start of the
spoken word. This is an experimental feature and
the accuracy of the time offset can vary.
end_offset (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the
audio that corresponds to the end of the spoken
word. This is an experimental feature and the
accuracy of the time offset can vary.
confidence (float):
The Speech confidence between 0.0 and 1.0 for
this word. A higher number indicates an
estimated greater likelihood that the recognized
word is correct. The default of 0.0 is a
sentinel value indicating that confidence was
not set.
This field is not guaranteed to be fully stable
over time for the same audio input. Users should
also not rely on it to always be provided.
"""
word = proto.Field(proto.STRING, number=3,)
start_offset = proto.Field(proto.MESSAGE, number=1, message=duration_pb2.Duration,)
end_offset = proto.Field(proto.MESSAGE, number=2, message=duration_pb2.Duration,)
confidence = proto.Field(proto.FLOAT, number=4,)
class InputAudioConfig(proto.Message):
r"""Instructs the speech recognizer on how to process the audio
content.
Attributes:
audio_encoding (google.cloud.dialogflowcx_v3.types.AudioEncoding):
Required. Audio encoding of the audio content
to process.
sample_rate_hertz (int):
Sample rate (in Hertz) of the audio content sent in the
query. Refer to `Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics>`__
for more details.
enable_word_info (bool):
Optional. If ``true``, Dialogflow returns
[SpeechWordInfo][google.cloud.dialogflow.cx.v3.SpeechWordInfo]
in
[StreamingRecognitionResult][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult]
with information about the recognized speech words, e.g.
start and end time offsets. If false or unspecified, Speech
doesn't return any word-level information.
phrase_hints (Sequence[str]):
Optional. A list of strings containing words and phrases
that the speech recognizer should recognize with higher
likelihood.
See `the Cloud Speech
documentation <https://cloud.google.com/speech-to-text/docs/basics#phrase-hints>`__
for more details.
model (str):
Optional. Which Speech model to select for the given
request. Select the model best suited to your domain to get
best results. If a model is not explicitly specified, then
we auto-select a model based on the parameters in the
InputAudioConfig. If enhanced speech model is enabled for
the agent and an enhanced version of the specified model for
the language does not exist, then the speech is recognized
using the standard version of the specified model. Refer to
`Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics#select-model>`__
for more details.
model_variant (google.cloud.dialogflowcx_v3.types.SpeechModelVariant):
Optional. Which variant of the [Speech
model][google.cloud.dialogflow.cx.v3.InputAudioConfig.model]
to use.
single_utterance (bool):
Optional. If ``false`` (default), recognition does not cease
until the client closes the stream. If ``true``, the
recognizer will detect a single spoken utterance in input
audio. Recognition ceases when it detects the audio's voice
has stopped or paused. In this case, once a detected intent
is received, the client should close the stream and start a
new request with a new stream as needed. Note: This setting
is relevant only for streaming methods.
"""
audio_encoding = proto.Field(proto.ENUM, number=1, enum="AudioEncoding",)
sample_rate_hertz = proto.Field(proto.INT32, number=2,)
enable_word_info = proto.Field(proto.BOOL, number=13,)
phrase_hints = proto.RepeatedField(proto.STRING, number=4,)
model = proto.Field(proto.STRING, number=7,)
model_variant = proto.Field(proto.ENUM, number=10, enum="SpeechModelVariant",)
single_utterance = proto.Field(proto.BOOL, number=8,)
class VoiceSelectionParams(proto.Message):
r"""Description of which voice to use for speech synthesis.
Attributes:
name (str):
Optional. The name of the voice. If not set, the service
will choose a voice based on the other parameters such as
language_code and
[ssml_gender][google.cloud.dialogflow.cx.v3.VoiceSelectionParams.ssml_gender].
For the list of available voices, please refer to `Supported
voices and
languages <https://cloud.google.com/text-to-speech/docs/voices>`__.
ssml_gender (google.cloud.dialogflowcx_v3.types.SsmlVoiceGender):
Optional. The preferred gender of the voice. If not set, the
service will choose a voice based on the other parameters
such as language_code and
[name][google.cloud.dialogflow.cx.v3.VoiceSelectionParams.name].
Note that this is only a preference, not requirement. If a
voice of the appropriate gender is not available, the
synthesizer substitutes a voice with a different gender
rather than failing the request.
"""
name = proto.Field(proto.STRING, number=1,)
ssml_gender = proto.Field(proto.ENUM, number=2, enum="SsmlVoiceGender",)
class SynthesizeSpeechConfig(proto.Message):
r"""Configuration of how speech should be synthesized.
Attributes:
speaking_rate (float):
Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0
is the normal native speed supported by the specific voice.
2.0 is twice as fast, and 0.5 is half as fast. If
unset(0.0), defaults to the native 1.0 speed. Any other
values < 0.25 or > 4.0 will return an error.
pitch (float):
Optional. Speaking pitch, in the range [-20.0, 20.0]. 20
means increase 20 semitones from the original pitch. -20
means decrease 20 semitones from the original pitch.
volume_gain_db (float):
Optional. Volume gain (in dB) of the normal native volume
supported by the specific voice, in the range [-96.0, 16.0].
If unset, or set to a value of 0.0 (dB), will play at normal
native signal amplitude. A value of -6.0 (dB) will play at
approximately half the amplitude of the normal native signal
amplitude. A value of +6.0 (dB) will play at approximately
twice the amplitude of the normal native signal amplitude.
We strongly recommend not to exceed +10 (dB) as there's
usually no effective increase in loudness for any value
greater than that.
effects_profile_id (Sequence[str]):
Optional. An identifier which selects 'audio
effects' profiles that are applied on (post
synthesized) text to speech. Effects are applied
on top of each other in the order they are
given.
voice (google.cloud.dialogflowcx_v3.types.VoiceSelectionParams):
Optional. The desired voice of the
synthesized audio.
"""
speaking_rate = proto.Field(proto.DOUBLE, number=1,)
pitch = proto.Field(proto.DOUBLE, number=2,)
volume_gain_db = proto.Field(proto.DOUBLE, number=3,)
effects_profile_id = proto.RepeatedField(proto.STRING, number=5,)
voice = proto.Field(proto.MESSAGE, number=4, message="VoiceSelectionParams",)
class OutputAudioConfig(proto.Message):
r"""Instructs the speech synthesizer how to generate the output
audio content.
Attributes:
audio_encoding (google.cloud.dialogflowcx_v3.types.OutputAudioEncoding):
Required. Audio encoding of the synthesized
audio content.
sample_rate_hertz (int):
Optional. The synthesis sample rate (in
hertz) for this audio. If not provided, then the
synthesizer will use the default sample rate
based on the audio encoding. If this is
different from the voice's natural sample rate,
then the synthesizer will honor this request by
converting to the desired sample rate (which
might result in worse audio quality).
synthesize_speech_config (google.cloud.dialogflowcx_v3.types.SynthesizeSpeechConfig):
Optional. Configuration of how speech should
be synthesized.
"""
audio_encoding = proto.Field(proto.ENUM, number=1, enum="OutputAudioEncoding",)
sample_rate_hertz = proto.Field(proto.INT32, number=2,)
synthesize_speech_config = proto.Field(
proto.MESSAGE, number=3, message="SynthesizeSpeechConfig",
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
"""Provides a set of functions nad clases for different purpose."""
import logging
from logging.handlers import WatchedFileHandler
import socket
import codecs
import time
import re
import os
import yaml
def delegate(attribute_name, method_names):
"""Pass the call to the attribute called attribute_name for every method listed in method_names."""
# hack for python 2.7 as nonlocal is not available
info = {
'attribute': attribute_name,
'methods': method_names
}
def decorator(cls):
"""Decorate class."""
attribute = info['attribute']
if attribute.startswith("__"):
attribute = "_" + cls.__name__ + attribute
for name in info['methods']:
setattr(cls, name, eval("lambda self, *a, **kw: "
"self.{0}.{1}(*a, **kw)".format(attribute, name)))
return cls
return decorator
def to_list(item):
"""Convert to list.
If the given item is iterable, this function returns the given item.
If the item is not iterable, this function returns a list with only the
item in it.
@type item: object
@param item: Any object.
@rtype: list
@return: A list with the item in it.
"""
if hasattr(item, '__iter__'):
return item
return [item]
def is_reachable(host, port=23):
"""Check reachability for specified hostname/port.
It tries to open TCP socket.
It supports IPv6.
:param host: hostname or ip address string
:rtype: str
:param port: tcp port number
:rtype: number
:return: True if host is reachable else false
"""
try:
addresses = socket.getaddrinfo(
host, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
except socket.gaierror:
return False
for family, _, _, _, sockaddr in addresses:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.settimeout(5)
try:
sock.connect(sockaddr)
except IOError:
continue
sock.shutdown(socket.SHUT_RDWR)
sock.close()
# Wait 2 sec for socket to shutdown
time.sleep(2)
break
else:
return False
return True
def pattern_to_str(pattern):
"""Convert regex pattern to string.
If pattern is string it returns itself,
if pattern is SRE_Pattern then return pattern attribute
:param pattern: pattern object or string
:return: str: pattern sttring
"""
if isinstance(pattern, str):
return pattern
else:
return pattern.pattern if pattern else None
def levenshtein_distance(str_a, str_b):
"""Calculate the Levenshtein distance between string a and b.
:param str_a: String - input string a
:param str_b: String - input string b
:return: Number - Levenshtein Distance between string a and b
"""
len_a, len_b = len(str_a), len(str_b)
if len_a > len_b:
str_a, str_b = str_b, str_a
len_a, len_b = len_b, len_a
current = range(len_a + 1)
for i in range(1, len_b + 1):
previous, current = current, [i] + [0] * len_a
for j in range(1, len_a + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if str_a[j - 1] != str_b[i - 1]:
change += + 1
current[j] = min(add, delete, change)
return current[len_a]
def parse_inventory(inventory_output=None):
"""Parse the inventory text and return udi dict."""
udi = {
"name": "",
"description": "",
"pid": "",
"vid": "",
"sn": ""
}
if inventory_output is None:
return udi
# find the record with chassis text in name or descr
capture_next = False
chassis_udi_text = None
for line in inventory_output.split('\n'):
lc_line = line.lower()
if 'chassis' in lc_line and 'name' in lc_line and 'descr':
capture_next = True
chassis_udi_text = line
continue
if capture_next:
inventory_output = chassis_udi_text + "\n" + line
break
match = re.search(r"(?i)NAME: (?P<name>.*?),? (?i)DESCR", inventory_output, re.MULTILINE)
if match:
udi['name'] = match.group('name').strip('" ,')
match = re.search(r"(?i)DESCR: (?P<description>.*)", inventory_output, re.MULTILINE)
if match:
udi['description'] = match.group('description').strip('" ')
match = re.search(r"(?i)PID: (?P<pid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['pid'] = match.group('pid')
match = re.search(r"(?i)VID: (?P<vid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['vid'] = match.group('vid')
match = re.search(r"(?i)SN: (?P<sn>.*)", inventory_output, re.MULTILINE)
if match:
udi['sn'] = match.group('sn')
return udi
class FilteredFile(object):
"""Delegate class for handling filtered file object."""
__slots__ = ['_file', '_pattern']
def __init__(self, filename, mode="r", encoding=None, pattern=None):
"""Initialize FilteredFile object."""
object.__setattr__(self, '_pattern', pattern)
if encoding is None:
object.__setattr__(self, '_file', open(filename, mode=mode))
else:
object.__setattr__(self, '_file', codecs.open(filename, mode=mode, encoding=encoding))
def __getattr__(self, name):
"""Override standard getattr and delegate to file object."""
return getattr(self._file, name)
def __setattr__(self, name, value):
"""Override standard setattr and delegate to file object."""
setattr(self._file, name, value)
def write(self, text):
"""Override the standard write method to filter the content."""
if self._pattern:
# pattern already compiled no need to check
result = re.search(self._pattern, text)
if result:
for group in result.groups():
if group:
text = text.replace(group, "***")
self._file.write(text)
def __exit__(self, exc_type, exc_val, exc_tb):
"""Custom context manager exit method."""
self._file.close()
def __enter__(self):
"""Custom context manager exit method."""
return self
class FilteredFileHandler(WatchedFileHandler):
"""Class defining custom FileHandler for filtering sensitive information."""
def __init__(self, filename, mode='a', encoding="utf-8", delay=0, pattern=None):
"""Initialize the FilteredFileHandler object."""
self.pattern = pattern
WatchedFileHandler.__init__(self, filename, mode=mode, encoding=encoding, delay=delay)
def _open(self):
return FilteredFile(self.baseFilename, mode=self.mode, encoding=self.encoding, pattern=self.pattern)
def normalize_urls(urls):
"""Overload urls and make list of lists of urls."""
_urls = []
if isinstance(urls, list):
if urls:
if isinstance(urls[0], list):
# multiple connections (list of the lists)
_urls = urls
elif isinstance(urls[0], str):
# single connections (make it list of the lists)
_urls = [urls]
else:
raise RuntimeError("No target host url provided.")
elif isinstance(urls, str):
_urls = [[urls]]
return _urls
def make_handler(log_dir, log_level):
"""Make logging handler."""
if log_level > 0:
if log_level == logging.DEBUG:
formatter = logging.Formatter('%(asctime)-15s [%(levelname)8s] %(name)s:'
'%(funcName)s(%(lineno)d): %(message)s')
else:
formatter = logging.Formatter('%(asctime)-15s [%(levelname)8s]: %(message)s')
if log_dir:
# Create the log directory.
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except IOError:
log_dir = "./"
log_filename = os.path.join(log_dir, 'condoor.log')
# FIXME: take pattern from pattern manager
handler = FilteredFileHandler(log_filename, pattern=re.compile("s?ftp://.*:(.*)@"))
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
else:
handler = logging.NullHandler()
return handler
def yaml_file_to_dict(script_name, path=None):
"""Read yaml file and return the dict.
It assumes the module file exists with the defaults.
If the CONDOOR_{SCRIPT_NAME} env is set then the user file from the env is loaded and merged with the default
There can be user file located in ~/.condoor directory with the {script_name}.yaml filename. If exists
it is merget with default config.
"""
def load_yaml(file_path):
"""Load YAML file from full file path and return dict."""
with open(file_path, 'r') as yamlfile:
try:
dictionary = yaml.load(yamlfile)
except yaml.YAMLError:
return {}
return dictionary
def merge(user, default):
"""Merge two dicts."""
if isinstance(user, dict) and isinstance(default, dict):
for k, v in default.iteritems():
if k not in user:
user[k] = v
else:
user[k] = merge(user[k], v)
return user
else:
return default
if path is None:
path = os.path.abspath('.')
config_file_path = os.path.join(path, script_name + '.yaml')
if not os.path.exists(config_file_path):
raise RuntimeError('Config file does not exist: {}'.format(config_file_path))
default_dict = load_yaml(config_file_path)
user_config_file_path = os.path.join(os.path.expanduser('~'), '.condoor', script_name + '.yaml')
user_config_file_path = os.getenv('CONDOOR_' + script_name.upper(), user_config_file_path)
if os.path.exists(user_config_file_path):
user_dict = load_yaml(user_config_file_path)
default_dict = merge(user_dict, default_dict)
return default_dict
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This is the top-level file to train, evaluate or test your summarization model"""
import sys
import time
import os
import tensorflow as tf
import numpy as np
from collections import namedtuple
from data import Vocab
from batcher import Batcher
from model import SummarizationModel
from decode import BeamSearchDecoder
import util
from tensorflow.python import debug as tf_debug
FLAGS = tf.app.flags.FLAGS
# Where to find data
tf.app.flags.DEFINE_string('data_path', '', 'Path expression to tf.Example datafiles. Can include wildcards to access multiple datafiles.')
tf.app.flags.DEFINE_string('vocab_path', '', 'Path expression to text vocabulary file.')
# Important settings
tf.app.flags.DEFINE_string('mode', 'train', 'must be one of train/eval/decode')
tf.app.flags.DEFINE_boolean('single_pass', False, 'For decode mode only. If True, run eval on the full dataset using a fixed checkpoint, i.e. take the current checkpoint, and use it to produce one summary for each example in the dataset, write the summaries to file and then get ROUGE scores for the whole dataset. If False (default), run concurrent decoding, i.e. repeatedly load latest checkpoint, use it to produce summaries for randomly-chosen examples and log the results to screen, indefinitely.')
# Where to save output
tf.app.flags.DEFINE_string('log_root', '', 'Root directory for all logging.')
tf.app.flags.DEFINE_string('exp_name', '', 'Name for experiment. Logs will be saved in a directory with this name, under log_root.')
# Hyperparameters
tf.app.flags.DEFINE_integer('hidden_dim', 256, 'dimension of RNN hidden states')
tf.app.flags.DEFINE_integer('emb_dim', 128, 'dimension of word embeddings')
tf.app.flags.DEFINE_integer('batch_size', 16, 'minibatch size')
tf.app.flags.DEFINE_integer('max_enc_steps', 400, 'max timesteps of encoder (max source text tokens)')
tf.app.flags.DEFINE_integer('max_dec_steps', 100, 'max timesteps of decoder (max summary tokens)')
tf.app.flags.DEFINE_integer('beam_size', 4, 'beam size for beam search decoding.')
tf.app.flags.DEFINE_integer('min_dec_steps', 35, 'Minimum sequence length of generated summary. Applies only for beam search decoding mode')
tf.app.flags.DEFINE_integer('vocab_size', 50000, 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.')
tf.app.flags.DEFINE_float('lr', 0.15, 'learning rate')
tf.app.flags.DEFINE_float('adagrad_init_acc', 0.1, 'initial accumulator value for Adagrad')
tf.app.flags.DEFINE_float('rand_unif_init_mag', 0.02, 'magnitude for lstm cells random uniform inititalization')
tf.app.flags.DEFINE_float('trunc_norm_init_std', 1e-4, 'std of trunc norm init, used for initializing everything else')
tf.app.flags.DEFINE_float('max_grad_norm', 2.0, 'for gradient clipping')
# Pointer-generator or baseline model
tf.app.flags.DEFINE_boolean('pointer_gen', True, 'If True, use pointer-generator model. If False, use baseline model.')
# Coverage hyperparameters
tf.app.flags.DEFINE_boolean('coverage', False, 'Use coverage mechanism. Note, the experiments reported in the ACL paper train WITHOUT coverage until converged, and then train for a short phase WITH coverage afterwards. i.e. to reproduce the results in the ACL paper, turn this off for most of training then turn on for a short phase at the end.')
tf.app.flags.DEFINE_float('cov_loss_wt', 1.0, 'Weight of coverage loss (lambda in the paper). If zero, then no incentive to minimize coverage loss.')
# Utility flags, for restoring and changing checkpoints
tf.app.flags.DEFINE_boolean('convert_to_coverage_model', False, 'Convert a non-coverage model to a coverage model. Turn this on and run in train mode. Your current training model will be copied to a new version (same name with _cov_init appended) that will be ready to run with coverage flag turned on, for the coverage training stage.')
tf.app.flags.DEFINE_boolean('restore_best_model', False, 'Restore the best model in the eval/ dir and save it in the train/ dir, ready to be used for further training. Useful for early stopping, or if your training checkpoint has become corrupted with e.g. NaN values.')
# Debugging. See https://www.tensorflow.org/programmers_guide/debugger
tf.app.flags.DEFINE_boolean('debug', False, "Run in tensorflow's debug mode (watches for NaN/inf values)")
def calc_running_avg_loss(loss, running_avg_loss, summary_writer, step, decay=0.99):
"""Calculate the running average loss via exponential decay.
This is used to implement early stopping w.r.t. a more smooth loss curve than the raw loss curve.
Args:
loss: loss on the most recent eval step
running_avg_loss: running_avg_loss so far
summary_writer: FileWriter object to write for tensorboard
step: training iteration step
decay: rate of exponential decay, a float between 0 and 1. Larger is smoother.
Returns:
running_avg_loss: new running average loss
"""
if running_avg_loss == 0: # on the first iteration just take the loss
running_avg_loss = loss
else:
running_avg_loss = running_avg_loss * decay + (1 - decay) * loss
running_avg_loss = min(running_avg_loss, 12) # clip
loss_sum = tf.Summary()
tag_name = 'running_avg_loss/decay=%f' % (decay)
loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)
summary_writer.add_summary(loss_sum, step)
tf.logging.info('running_avg_loss: %f', running_avg_loss)
return running_avg_loss
def restore_best_model():
"""Load bestmodel file from eval directory, add variables for adagrad, and save to train directory"""
tf.logging.info("Restoring bestmodel for training...")
# Initialize all vars in the model
sess = tf.Session(config=util.get_config())
print "Initializing all variables..."
sess.run(tf.initialize_all_variables())
# Restore the best model from eval dir
saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name])
print "Restoring all non-adagrad variables from best model in eval dir..."
curr_ckpt = util.load_ckpt(saver, sess, "eval")
print "Restored %s." % curr_ckpt
# Save this model to train dir and quit
new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model")
new_fname = os.path.join(FLAGS.log_root, "train", new_model_name)
print "Saving model to %s..." % (new_fname)
new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables
new_saver.save(sess, new_fname)
print "Saved."
exit()
def convert_to_coverage_model():
"""Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint"""
tf.logging.info("converting non-coverage model to coverage model..")
# initialize an entire coverage model from scratch
sess = tf.Session(config=util.get_config())
print "initializing everything..."
sess.run(tf.global_variables_initializer())
# load all non-coverage weights from checkpoint
saver = tf.train.Saver([v for v in tf.global_variables() if "coverage" not in v.name and "Adagrad" not in v.name])
print "restoring non-coverage variables..."
curr_ckpt = util.load_ckpt(saver, sess)
print "restored."
# save this model and quit
new_fname = curr_ckpt + '_cov_init'
print "saving model to %s..." % (new_fname)
new_saver = tf.train.Saver() # this one will save all variables that now exist
new_saver.save(sess, new_fname)
print "saved."
exit()
def setup_training(model, batcher):
"""Does setup before starting training (run_training)"""
train_dir = os.path.join(FLAGS.log_root, "train")
if not os.path.exists(train_dir): os.makedirs(train_dir)
model.build_graph() # build the graph
if FLAGS.convert_to_coverage_model:
assert FLAGS.coverage, "To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True"
convert_to_coverage_model()
if FLAGS.restore_best_model:
restore_best_model()
saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time
sv = tf.train.Supervisor(logdir=train_dir,
is_chief=True,
saver=saver,
summary_op=None,
save_summaries_secs=60, # save summaries for tensorboard every 60 secs
save_model_secs=60, # checkpoint every 60 secs
global_step=model.global_step)
summary_writer = sv.summary_writer
tf.logging.info("Preparing or waiting for session...")
sess_context_manager = sv.prepare_or_wait_for_session(config=util.get_config())
tf.logging.info("Created session.")
try:
run_training(model, batcher, sess_context_manager, sv, summary_writer) # this is an infinite loop until interrupted
except KeyboardInterrupt:
tf.logging.info("Caught keyboard interrupt on worker. Stopping supervisor...")
sv.stop()
def run_training(model, batcher, sess_context_manager, sv, summary_writer):
"""Repeatedly runs training iterations, logging loss to screen and writing summaries"""
tf.logging.info("starting run_training")
with sess_context_manager as sess:
if FLAGS.debug: # start the tensorflow debugger
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
while True: # repeats until interrupted
batch = batcher.next_batch()
tf.logging.info('running training step...')
t0=time.time()
results = model.run_train_step(sess, batch)
t1=time.time()
tf.logging.info('seconds for training step: %.3f', t1-t0)
loss = results['loss']
tf.logging.info('loss: %f', loss) # print the loss to screen
if not np.isfinite(loss):
raise Exception("Loss is not finite. Stopping.")
if FLAGS.coverage:
coverage_loss = results['coverage_loss']
tf.logging.info("coverage_loss: %f", coverage_loss) # print the coverage loss to screen
# get the summaries and iteration number so we can write summaries to tensorboard
summaries = results['summaries'] # we will write these summaries to tensorboard using summary_writer
train_step = results['global_step'] # we need this to update our running average loss
summary_writer.add_summary(summaries, train_step) # write the summaries
if train_step % 100 == 0: # flush the summary writer every so often
summary_writer.flush()
def run_eval(model, batcher, vocab):
"""Repeatedly runs eval iterations, logging to screen and writing summaries. Saves the model with the best loss seen so far."""
model.build_graph() # build the graph
saver = tf.train.Saver(max_to_keep=3) # we will keep 3 best checkpoints at a time
sess = tf.Session(config=util.get_config())
eval_dir = os.path.join(FLAGS.log_root, "eval") # make a subdir of the root dir for eval data
bestmodel_save_path = os.path.join(eval_dir, 'bestmodel') # this is where checkpoints of best models are saved
summary_writer = tf.summary.FileWriter(eval_dir)
running_avg_loss = 0 # the eval job keeps a smoother, running average loss to tell it when to implement early stopping
best_loss = None # will hold the best loss achieved so far
while True:
_ = util.load_ckpt(saver, sess) # load a new checkpoint
batch = batcher.next_batch() # get the next batch
# run eval on the batch
t0=time.time()
results = model.run_eval_step(sess, batch)
t1=time.time()
tf.logging.info('seconds for batch: %.2f', t1-t0)
# print the loss and coverage loss to screen
loss = results['loss']
tf.logging.info('loss: %f', loss)
if FLAGS.coverage:
coverage_loss = results['coverage_loss']
tf.logging.info("coverage_loss: %f", coverage_loss)
# add summaries
summaries = results['summaries']
train_step = results['global_step']
summary_writer.add_summary(summaries, train_step)
# calculate running avg loss
running_avg_loss = calc_running_avg_loss(np.asscalar(loss), running_avg_loss, summary_writer, train_step)
# If running_avg_loss is best so far, save this checkpoint (early stopping).
# These checkpoints will appear as bestmodel-<iteration_number> in the eval dir
if best_loss is None or running_avg_loss < best_loss:
tf.logging.info('Found new best model with %.3f running_avg_loss. Saving to %s', running_avg_loss, bestmodel_save_path)
saver.save(sess, bestmodel_save_path, global_step=train_step, latest_filename='checkpoint_best')
best_loss = running_avg_loss
# flush the summary writer every so often
if train_step % 100 == 0:
summary_writer.flush()
def main(unused_argv):
if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
raise Exception("Problem with flags: %s" % unused_argv)
tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))
# Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
if not os.path.exists(FLAGS.log_root):
if FLAGS.mode=="train":
os.makedirs(FLAGS.log_root)
else:
raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))
vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary
# If in decode mode, set batch_size = beam_size
# Reason: in decode mode, we decode one example at a time.
# On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
if FLAGS.mode == 'decode':
FLAGS.batch_size = FLAGS.beam_size
# If single_pass=True, check we're in decode mode
if FLAGS.single_pass and FLAGS.mode!='decode':
raise Exception("The single_pass flag should only be True in decode mode")
# Make a namedtuple hps, containing the values of the hyperparameters that the model needs
hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
hps_dict = {}
for key,val in FLAGS.__flags.iteritems(): # for each flag
if key in hparam_list: # if it's in the list
hps_dict[key] = val # add it to the dict
hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
# Create a batcher object that will create minibatches of data
batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass)
tf.set_random_seed(111) # a seed value for randomness
if hps.mode == 'train':
print "creating model..."
model = SummarizationModel(hps, vocab)
setup_training(model, batcher)
elif hps.mode == 'eval':
model = SummarizationModel(hps, vocab)
run_eval(model, batcher, vocab)
elif hps.mode == 'decode':
decode_model_hps = hps # This will be the hyperparameters for the decoder model
decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
model = SummarizationModel(decode_model_hps, vocab)
decoder = BeamSearchDecoder(model, batcher, vocab)
decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
else:
raise ValueError("The 'mode' flag must be one of train/eval/decode")
if __name__ == '__main__':
tf.app.run()
|
|
from os import path
import pyteomics
import pickle
#pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
import random
from pyteomics import auxiliary, mass
from pyteomics import cmass, cparser
cmass.nist_mass = mass.nist_mass
import gzip
class MassTest(unittest.TestCase):
def setUp(self):
self.mass_data = {
'A' : {0: (1.0, 1.0),
1: (1.0, 0.5),
2: (2.0, 0.5)},
'B' : {0: (2.0, 1.0),
2: (2.0, 0.5),
3: (3.0, 0.5)},
'C' : {0: (3.0, 1.0),
3: (3.0, 0.5),
4: (4.0, 0.5)},
'D' : {0: (4.0, 1.0),
4: (4.0, 0.5),
5: (5.0, 0.5)},
'E' : {0: (5.0, 1.0),
5: (5.0, 0.5),
6: (6.0, 0.5)},
'F' : {0: (6.0, 1.0),
6: (6.0, 0.7),
7: (7.0, 0.3)},
'H+': {0: (5.0, 1.0),
5: (5.0, 1.0)},
}
self.mass_H = cmass.nist_mass['H'][0][0]
self.mass_O = cmass.nist_mass['O'][0][0]
self.test_aa_mass = {'X': 1.0, 'Y': 2.0, 'Z': 3.0}
self.random_peptides = [
''.join([random.choice('XYZ') for i in range(20)])
for i in range(10)]
self.aa_comp = {'X': cmass.Composition({'A': 1},
mass_data=self.mass_data),
'Y': cmass.Composition({'B': 1},
mass_data=self.mass_data),
'Z': cmass.Composition({'C': 1},
mass_data=self.mass_data),
'H-': cmass.Composition({'D': 1},
mass_data=self.mass_data),
'-OH': cmass.Composition({'E': 1},
mass_data=self.mass_data),
}
self.ion_comp = {'M': cmass.Composition({},
mass_data=self.mass_data),
'a': cmass.Composition({'A': -1},
mass_data=self.mass_data)}
self.mods = {'a': cmass.Composition(A=1),
'b': cmass.Composition(B=1)}
self.d = {atom: 1 for atom in 'ABCDE'}
def test_fast_mass(self):
for pep in self.random_peptides:
self.assertAlmostEqual(
cmass.fast_mass(pep, aa_mass=self.test_aa_mass),
sum(pep.count(aa) * m
for aa, m in self.test_aa_mass.items())
+ self.mass_H * 2.0 + self.mass_O)
def test_fast_mass2(self):
for pep in self.random_peptides:
self.assertAlmostEqual(
cmass.fast_mass2(pep, aa_mass=self.test_aa_mass),
sum(pep.count(aa) * m
for aa, m in self.test_aa_mass.items())
+ self.mass_H * 2.0 + self.mass_O)
def test_Composition_dict(self):
# Test Composition from a dict.
self.assertEqual(
cmass.Composition(self.d, mass_data=self.mass_data), self.d)
def test_Composition_formula(self):
# Test Composition from a formula.
self.assertEqual(self.d,
cmass.Composition(formula='ABCDE',
mass_data={atom: {0: (1.0, 1.0)} for atom in 'ABCDE'}))
def test_Composition_seq(self):
# Test Composition from a sequence.
self.assertEqual(self.d,
cmass.Composition(sequence='XYZ', aa_comp=self.aa_comp))
def test_Composition_pseq(self):
# Test Composition from a parsed sequence.
self.assertEqual(
cmass.Composition(parsed_sequence=['X', 'Y', 'Z'],
aa_comp=self.aa_comp),
{atom: 1 for atom in 'ABC'})
def test_Composition_sseq(self):
# Test Composition from a split sequence.
self.assertEqual(
cmass.Composition(split_sequence=[('X',), ('Y',), ('Z',)],
aa_comp=self.aa_comp),
{atom: 1 for atom in 'ABC'})
def test_Composition_sum(self):
# Test sum of Composition objects.
self.assertEqual(
cmass.Composition(sequence='XXY', aa_comp=self.aa_comp)
+ cmass.Composition(sequence='YZZ', aa_comp=self.aa_comp),
{atom: 2 for atom in 'ABCDE'})
def test_Composition_sub(self):
# Test subtraction of Composition objects
self.assertEqual({}
- cmass.Composition(sequence='XYZ', aa_comp=self.aa_comp),
{atom: -1 for atom in 'ABCDE'})
def test_Composition_mul(self):
# Test multiplication of Composition by integers
self.assertEqual(
2 * cmass.Composition(sequence='XYZ', aa_comp=self.aa_comp),
{atom: 2 for atom in 'ABCDE'})
self.assertEqual(
cmass.Composition(sequence='XYZ', aa_comp=self.aa_comp) * 2,
{atom: 2 for atom in 'ABCDE'})
def test_Composition_positional(self):
# Test creation from positional args
ac = self.aa_comp.copy()
ac.update(self.mods)
self.assertEqual(cmass.Composition('aXbYZ', aa_comp=ac),
{'A': 2, 'B': 2, 'C': 1, 'D': 1, 'E': 1})
self.assertEqual(cmass.Composition('AB2C3', mass_data=self.mass_data),
{'A': 1, 'B': 2, 'C': 3})
def test_calculate_mass(self):
# Calculate mass by a formula.
self.assertEqual(
cmass.calculate_mass(formula='ABCDE', mass_data=self.mass_data),
sum([self.mass_data[atom][0][0] for atom in 'ABCDE']))
# Calculate mass by a sequence.
self.assertEqual(
cmass.calculate_mass(sequence='XYZ',
aa_comp=self.aa_comp,
mass_data=self.mass_data),
sum([self.mass_data[atom][0][0] for atom in 'ABCDE']))
# Calculate mass by a parsed sequence.
self.assertEqual(
cmass.calculate_mass(parsed_sequence=['H-','X','Y','Z','-OH'],
aa_comp=self.aa_comp,
mass_data=self.mass_data),
sum([self.mass_data[atom][0][0] for atom in 'ABCDE']))
# Calculate average mass by a formula.
self.assertEqual(
cmass.calculate_mass(formula='ABCDE',
average=True,
mass_data=self.mass_data),
sum([self.mass_data[atom][isotope][0]
* self.mass_data[atom][isotope][1]
for atom in 'ABCDE'
for isotope in self.mass_data[atom] if isotope != 0]))
# Calculate m/z of an ion.
for charge in [1,2,3]:
self.assertEqual(
cmass.calculate_mass(formula='ABCDE',
ion_type='M',
charge=charge,
mass_data=self.mass_data),
cmass.calculate_mass(formula='ABCDE'+'H+%d' % (charge,),
mass_data=self.mass_data))
self.assertEqual(
cmass.calculate_mass(formula='ABCDE',
ion_type='M',
charge=charge,
mass_data=self.mass_data),
(cmass.calculate_mass(formula='ABCDE',
mass_data=self.mass_data)
+ self.mass_data['H+'][0][0] * charge
) / charge)
self.assertRaises(
auxiliary.PyteomicsError,
cmass.calculate_mass,
**{'formula': 'ABCDEH+%d' % charge,
'ion_type': 'M',
'charge': charge,
'mass_data': self.mass_data})
# Sanity check.
for pep in self.random_peptides:
self.assertEqual(cmass.calculate_mass(
sequence=pep, aa_comp=self.aa_comp, mass_data=self.mass_data,
ion_comp=self.ion_comp),
cmass.calculate_mass(
parsed_sequence=cparser.parse(
pep, labels=['X', 'Y', 'Z'], show_unmodified_termini=True),
aa_comp=self.aa_comp, mass_data=self.mass_data,
ion_comp=self.ion_comp))
def test_composition_objects_are_pickleable(self):
dict_ = cmass.Composition(self.d, mass_data=self.mass_data)
formula = cmass.Composition(formula='ABCDE',
mass_data={atom: {0: (1.0, 1.0)} for atom in 'ABCDE'})
sequence = cmass.Composition(sequence='XYZ', aa_comp=self.aa_comp)
parsed_sequence = cmass.Composition(parsed_sequence=['X', 'Y', 'Z'],
aa_comp=self.aa_comp)
split_sequence = cmass.Composition(split_sequence=[('X',), ('Y',), ('Z',)],
aa_comp=self.aa_comp)
self.assertEqual(dict_, pickle.loads(pickle.dumps(dict_)))
self.assertEqual(formula, pickle.loads(pickle.dumps(formula)))
self.assertEqual(sequence, pickle.loads(pickle.dumps(sequence)))
self.assertEqual(parsed_sequence, pickle.loads(pickle.dumps(parsed_sequence)))
self.assertEqual(split_sequence, pickle.loads(pickle.dumps(split_sequence)))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import re
from oslo_concurrency import processutils
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LE
LOG = logging.getLogger(__name__)
class StorwizeSSH(object):
"""SSH interface to IBM Storwize family and SVC storage systems."""
def __init__(self, run_ssh):
self._ssh = run_ssh
def _run_ssh(self, ssh_cmd):
try:
return self._ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s') %
{'cmd': ssh_cmd,
'out': e.stdout,
'err': e.stderr})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
"""Run an SSH command and return parsed output."""
raw = self._run_ssh(ssh_cmd)
return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
with_header=with_header)
def run_ssh_assert_no_output(self, ssh_cmd):
"""Run an SSH command and assert no output returned."""
out, err = self._run_ssh(ssh_cmd)
if len(out.strip()) != 0:
msg = (_('Expected no output from CLI command %(cmd)s, '
'got %(out)s') % {'cmd': ' '.join(ssh_cmd), 'out': out})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_check_created(self, ssh_cmd):
"""Run an SSH command and return the ID of the created object."""
out, err = self._run_ssh(ssh_cmd)
try:
match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
return match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsnode(self, node_id=None):
with_header = True
ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
if node_id:
with_header = False
ssh_cmd.append(node_id)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lslicense(self):
ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lssystem(self):
ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lsmdiskgrp(self, pool):
ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!',
'"%s"' % pool]
return self.run_ssh_info(ssh_cmd)[0]
def lsiogrp(self):
ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsportip(self):
ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
@staticmethod
def _create_port_arg(port_type, port_name):
if port_type == 'initiator':
port = ['-iscsiname']
else:
port = ['-hbawwpn']
port.append(port_name)
return port
def mkhost(self, host_name, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'mkhost', '-force'] + port
ssh_cmd += ['-name', '"%s"' % host_name]
return self.run_ssh_check_created(ssh_cmd)
def addhostport(self, host, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lshost(self, host=None):
with_header = True
ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
if host:
with_header = False
ssh_cmd.append('"%s"' % host)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def add_chap_secret(self, secret, host):
ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lsiscsiauth(self):
ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfabric(self, wwpn=None, host=None):
ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!']
if wwpn:
ssh_cmd.extend(['-wwpn', wwpn])
elif host:
ssh_cmd.extend(['-host', '"%s"' % host])
else:
msg = (_('Must pass wwpn or host to lsfabric.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return self.run_ssh_info(ssh_cmd, with_header=True)
def mkvdiskhostmap(self, host, vdisk, lun, multihostmap):
"""Map vdisk to host.
If vdisk already mapped and multihostmap is True, use the force flag.
"""
ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host,
'-scsi', lun, vdisk]
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if 'successfully created' in out:
return
if not err:
msg = (_('Did not find success message nor error for %(fun)s: '
'%(out)s') % {'out': out, 'fun': ssh_cmd})
raise exception.VolumeBackendAPIException(data=msg)
if err.startswith('CMMVC6071E'):
if not multihostmap:
LOG.error(_LE('storwize_svc_multihostmap_enabled is set '
'to False, not allowing multi host mapping.'))
msg = 'CMMVC6071E The VDisk-to-host mapping '\
'was not created because the VDisk is '\
'already mapped to a host.\n"'
raise exception.VolumeDriverException(message=msg)
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
return self.run_ssh_check_created(ssh_cmd)
def rmvdiskhostmap(self, host, vdisk):
ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskhostmap(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lshostvdiskmap(self, host):
ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host]
return self.run_ssh_info(ssh_cmd, with_header=True)
def rmhost(self, host):
ssh_cmd = ['svctask', 'rmhost', '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def mkvdisk(self, name, size, units, pool, opts, params):
ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp',
'"%s"' % pool, '-iogrp', str(opts['iogrp']), '-size',
size, '-unit', units] + params
return self.run_ssh_check_created(ssh_cmd)
def rmvdisk(self, vdisk, force=True):
ssh_cmd = ['svctask', 'rmvdisk']
if force:
ssh_cmd += ['-force']
ssh_cmd += [vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdisk(self, vdisk):
"""Return vdisk attributes or None if it doesn't exist."""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk]
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if not len(err):
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)[0]
if err.startswith('CMMVC5754E'):
return None
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsvdisks_from_filter(self, filter_name, value):
"""Performs an lsvdisk command, filtering the results as specified.
Returns an iterable for all matching vdisks.
"""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
'-filtervalue', '%s=%s' % (filter_name, value)]
return self.run_ssh_info(ssh_cmd, with_header=True)
def chvdisk(self, vdisk, params):
ssh_cmd = ['svctask', 'chvdisk'] + params + [vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def movevdisk(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def expandvdisksize(self, vdisk, amount):
ssh_cmd = (['svctask', 'expandvdisksize', '-size', str(amount),
'-unit', 'gb', vdisk])
self.run_ssh_assert_no_output(ssh_cmd)
def mkfcmap(self, source, target, full_copy, consistgrp=None):
ssh_cmd = ['svctask', 'mkfcmap', '-source', source, '-target',
target, '-autodelete']
if not full_copy:
ssh_cmd.extend(['-copyrate', '0'])
if consistgrp:
ssh_cmd.extend(['-consistgrp', consistgrp])
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if 'successfully created' not in out:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], '
'successfully created', out)
fc_map_id = match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return fc_map_id
def prestartfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'startfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def prestartfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
'-autodelete', autodel, fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'stopfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def rmfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskfcmappings(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcmap(self, fc_map_id):
ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
'id=%s' % fc_map_id, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcconsistgrp(self, fc_consistgrp):
ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp]
out, err = self._ssh(ssh_cmd)
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)
def mkfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group]
return self.run_ssh_check_created(ssh_cmd)
def rmfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group]
return self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskcopy(self, vdisk, dest_pool, params):
ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
'"%s"' % dest_pool, vdisk])
return self.run_ssh_check_created(ssh_cmd)
def lsvdiskcopy(self, vdisk, copy_id=None):
ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!']
with_header = True
if copy_id:
ssh_cmd += ['-copy', copy_id]
with_header = False
ssh_cmd += [vdisk]
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lsvdisksyncprogress(self, vdisk, copy_id):
ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!',
'-copy', copy_id, vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)[0]
def rmvdiskcopy(self, vdisk, copy_id):
ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def rmvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
class CLIResponse(object):
'''Parse SVC CLI output and generate iterable.'''
def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
super(CLIResponse, self).__init__()
if ssh_cmd:
self.ssh_cmd = ' '.join(ssh_cmd)
else:
self.ssh_cmd = 'None'
self.raw = raw
self.delim = delim
self.with_header = with_header
self.result = self._parse()
def select(self, *keys):
for a in self.result:
vs = []
for k in keys:
v = a.get(k, None)
if isinstance(v, basestring) or v is None:
v = [v]
if isinstance(v, list):
vs.append(v)
for item in zip(*vs):
if len(item) == 1:
yield item[0]
else:
yield item
def __getitem__(self, key):
try:
return self.result[key]
except KeyError:
msg = (_('Did not find expected key %(key)s in %(fun)s: %(raw)s') %
{'key': key, 'fun': self.ssh_cmd, 'raw': self.raw})
raise exception.VolumeBackendAPIException(data=msg)
def __iter__(self):
for a in self.result:
yield a
def __len__(self):
return len(self.result)
def _parse(self):
def get_reader(content, delim):
for line in content.lstrip().splitlines():
line = line.strip()
if line:
yield line.split(delim)
else:
yield []
if isinstance(self.raw, basestring):
stdout, stderr = self.raw, ''
else:
stdout, stderr = self.raw
reader = get_reader(stdout, self.delim)
result = []
if self.with_header:
hds = tuple()
for row in reader:
hds = row
break
for row in reader:
cur = dict()
if len(hds) != len(row):
msg = (_('Unexpected CLI response: header/row mismatch. '
'header: %(header)s, row: %(row)s')
% {'header': hds,
'row': row})
raise exception.VolumeBackendAPIException(data=msg)
for k, v in zip(hds, row):
CLIResponse.append_dict(cur, k, v)
result.append(cur)
else:
cur = dict()
for row in reader:
if row:
CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
elif cur: # start new section
result.append(cur)
cur = dict()
if cur:
result.append(cur)
return result
@staticmethod
def append_dict(dict_, key, value):
key, value = key.strip(), value.strip()
obj = dict_.get(key, None)
if obj is None:
dict_[key] = value
elif isinstance(obj, list):
obj.append(value)
dict_[key] = obj
else:
dict_[key] = [obj, value]
return dict_
|
|
""" Access and control log capturing. """
import logging
import re
from contextlib import contextmanager
import py
import pytest
from _pytest.compat import nullcontext
from _pytest.config import create_terminal_writer
from _pytest.pathlib import Path
DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m")
def _remove_ansi_escape_sequences(text):
return _ANSI_ESCAPE_SEQ.sub("", text)
class ColoredLevelFormatter(logging.Formatter):
"""
Colorize the %(levelname)..s part of the log format passed to __init__.
"""
LOGLEVEL_COLOROPTS = {
logging.CRITICAL: {"red"},
logging.ERROR: {"red", "bold"},
logging.WARNING: {"yellow"},
logging.WARN: {"yellow"},
logging.INFO: {"green"},
logging.DEBUG: {"purple"},
logging.NOTSET: set(),
}
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*s)")
def __init__(self, terminalwriter, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_fmt = self._style._fmt
self._level_to_fmt_mapping = {}
levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
if not levelname_fmt_match:
return
levelname_fmt = levelname_fmt_match.group()
for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
formatted_levelname = levelname_fmt % {
"levelname": logging.getLevelName(level)
}
# add ANSI escape sequences around the formatted levelname
color_kwargs = {name: True for name in color_opts}
colorized_formatted_levelname = terminalwriter.markup(
formatted_levelname, **color_kwargs
)
self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
colorized_formatted_levelname, self._fmt
)
def format(self, record):
fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
self._style._fmt = fmt
return super().format(record)
class PercentStyleMultiline(logging.PercentStyle):
"""A logging style with special support for multiline messages.
If the message of a record consists of multiple lines, this style
formats the message as if each line were logged separately.
"""
@staticmethod
def _update_message(record_dict, message):
tmp = record_dict.copy()
tmp["message"] = message
return tmp
def format(self, record):
if "\n" in record.message:
lines = record.message.splitlines()
formatted = self._fmt % self._update_message(record.__dict__, lines[0])
# TODO optimize this by introducing an option that tells the
# logging framework that the indentation doesn't
# change. This allows to compute the indentation only once.
indentation = _remove_ansi_escape_sequences(formatted).find(lines[0])
lines[0] = formatted
return ("\n" + " " * indentation).join(lines)
else:
return self._fmt % record.__dict__
def get_option_ini(config, *names):
for name in names:
ret = config.getoption(name) # 'default' arg won't work as expected
if ret is None:
ret = config.getini(name)
if ret:
return ret
def pytest_addoption(parser):
"""Add options to control log capturing."""
group = parser.getgroup("logging")
def add_option_ini(option, dest, default=None, type=None, **kwargs):
parser.addini(
dest, default=default, type=type, help="default value for " + option
)
group.addoption(option, dest=dest, **kwargs)
add_option_ini(
"--no-print-logs",
dest="log_print",
action="store_const",
const=False,
default=True,
type="bool",
help="disable printing caught logs on failed tests.",
)
add_option_ini(
"--log-level",
dest="log_level",
default=None,
help="logging level used by the logging module",
)
add_option_ini(
"--log-format",
dest="log_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-date-format",
dest="log_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
parser.addini(
"log_cli",
default=False,
type="bool",
help='enable log display during test run (also known as "live logging").',
)
add_option_ini(
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
)
add_option_ini(
"--log-cli-format",
dest="log_cli_format",
default=None,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-cli-date-format",
dest="log_cli_date_format",
default=None,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-file",
dest="log_file",
default=None,
help="path to a file when logging will be written to.",
)
add_option_ini(
"--log-file-level",
dest="log_file_level",
default=None,
help="log file logging level.",
)
add_option_ini(
"--log-file-format",
dest="log_file_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-file-date-format",
dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
@contextmanager
def catching_logs(handler, formatter=None, level=None):
"""Context manager that prepares the whole logging machinery properly."""
root_logger = logging.getLogger()
if formatter is not None:
handler.setFormatter(formatter)
if level is not None:
handler.setLevel(level)
# Adding the same handler twice would confuse logging system.
# Just don't do that.
add_new_handler = handler not in root_logger.handlers
if add_new_handler:
root_logger.addHandler(handler)
if level is not None:
orig_level = root_logger.level
root_logger.setLevel(min(orig_level, level))
try:
yield handler
finally:
if level is not None:
root_logger.setLevel(orig_level)
if add_new_handler:
root_logger.removeHandler(handler)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
def __init__(self):
"""Creates a new log handler."""
logging.StreamHandler.__init__(self, py.io.TextIO())
self.records = []
def emit(self, record):
"""Keep the log records in a list in addition to the log text."""
self.records.append(record)
logging.StreamHandler.emit(self, record)
def reset(self):
self.records = []
self.stream = py.io.TextIO()
class LogCaptureFixture:
"""Provides access and control of log capturing."""
def __init__(self, item):
"""Creates a new funcarg."""
self._item = item
# dict of log name -> log level
self._initial_log_levels = {} # Dict[str, int]
def _finalize(self):
"""Finalizes the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
# restore log levels
for logger_name, level in self._initial_log_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
@property
def handler(self):
"""
:rtype: LogCaptureHandler
"""
return self._item.catch_log_handler
def get_records(self, when):
"""
Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:rtype: List[logging.LogRecord]
:return: the list of captured records at the given stage
.. versionadded:: 3.4
"""
handler = self._item.catch_log_handlers.get(when)
if handler:
return handler.records
else:
return []
@property
def text(self):
"""Returns the formatted log text."""
return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
@property
def records(self):
"""Returns the list of log records."""
return self.handler.records
@property
def record_tuples(self):
"""Returns a list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self):
"""Returns a list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list
are all interpolated.
Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with
levels, timestamps, etc, making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments
to the logging functions) is not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
return [r.getMessage() for r in self.records]
def clear(self):
"""Reset the list of log records and the captured log text."""
self.handler.reset()
def set_level(self, level, logger=None):
"""Sets the level for capturing of logs. The level will be restored to its previous value at the end of
the test.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
.. versionchanged:: 3.4
The levels of the loggers changed by this function will be restored to their initial values at the
end of the test.
"""
logger_name = logger
logger = logging.getLogger(logger_name)
# save the original log-level to restore it during teardown
self._initial_log_levels.setdefault(logger_name, logger.level)
logger.setLevel(level)
@contextmanager
def at_level(self, level, logger=None):
"""Context manager that sets the level for capturing of logs. After the end of the 'with' statement the
level is restored to its original value.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
"""
logger = logging.getLogger(logger)
orig_level = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(orig_level)
@pytest.fixture
def caplog(request):
"""Access and control log capturing.
Captured logs are available through the following properties/methods::
* caplog.messages -> list of format-interpolated log messages
* caplog.text -> string containing formatted log output
* caplog.records -> list of logging.LogRecord instances
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
result = LogCaptureFixture(request.node)
yield result
result._finalize()
def get_actual_log_level(config, *setting_names):
"""Return the actual logging level."""
for setting_name in setting_names:
log_level = config.getoption(setting_name)
if log_level is None:
log_level = config.getini(setting_name)
if log_level:
break
else:
return
if isinstance(log_level, str):
log_level = log_level.upper()
try:
return int(getattr(logging, log_level, log_level))
except ValueError:
# Python logging does not recognise this as a logging level
raise pytest.UsageError(
"'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name)
)
# run after terminalreporter/capturemanager are configured
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
class LoggingPlugin:
"""Attaches to the logging module and captures log messages for each test.
"""
def __init__(self, config):
"""Creates a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
"""
self._config = config
self.print_logs = get_option_ini(config, "log_print")
self.formatter = self._create_formatter(
get_option_ini(config, "log_format"),
get_option_ini(config, "log_date_format"),
)
self.log_level = get_actual_log_level(config, "log_level")
self.log_file_level = get_actual_log_level(config, "log_file_level")
self.log_file_format = get_option_ini(config, "log_file_format", "log_format")
self.log_file_date_format = get_option_ini(
config, "log_file_date_format", "log_date_format"
)
self.log_file_formatter = logging.Formatter(
self.log_file_format, datefmt=self.log_file_date_format
)
log_file = get_option_ini(config, "log_file")
if log_file:
self.log_file_handler = logging.FileHandler(
log_file, mode="w", encoding="UTF-8"
)
self.log_file_handler.setFormatter(self.log_file_formatter)
else:
self.log_file_handler = None
self.log_cli_handler = None
self.live_logs_context = lambda: nullcontext()
# Note that the lambda for the live_logs_context is needed because
# live_logs_context can otherwise not be entered multiple times due
# to limitations of contextlib.contextmanager.
if self._log_cli_enabled():
self._setup_cli_logging()
def _create_formatter(self, log_format, log_date_format):
# color option doesn't exist if terminal plugin is disabled
color = getattr(self._config.option, "color", "no")
if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
log_format
):
formatter = ColoredLevelFormatter(
create_terminal_writer(self._config), log_format, log_date_format
)
else:
formatter = logging.Formatter(log_format, log_date_format)
formatter._style = PercentStyleMultiline(formatter._style._fmt)
return formatter
def _setup_cli_logging(self):
config = self._config
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
if terminal_reporter is None:
# terminal reporter is disabled e.g. by pytest-xdist.
return
capture_manager = config.pluginmanager.get_plugin("capturemanager")
# if capturemanager plugin is disabled, live logging still works.
log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
log_cli_formatter = self._create_formatter(
get_option_ini(config, "log_cli_format", "log_format"),
get_option_ini(config, "log_cli_date_format", "log_date_format"),
)
log_cli_level = get_actual_log_level(config, "log_cli_level", "log_level")
self.log_cli_handler = log_cli_handler
self.live_logs_context = lambda: catching_logs(
log_cli_handler, formatter=log_cli_formatter, level=log_cli_level
)
def set_log_path(self, fname):
"""Public method, which can set filename parameter for
Logging.FileHandler(). Also creates parent directory if
it does not exist.
.. warning::
Please considered as an experimental API.
"""
fname = Path(fname)
if not fname.is_absolute():
fname = Path(self._config.rootdir, fname)
if not fname.parent.exists():
fname.parent.mkdir(exist_ok=True, parents=True)
self.log_file_handler = logging.FileHandler(
str(fname), mode="w", encoding="UTF-8"
)
self.log_file_handler.setFormatter(self.log_file_formatter)
def _log_cli_enabled(self):
"""Return True if log_cli should be considered enabled, either explicitly
or because --log-cli-level was given in the command-line.
"""
return self._config.getoption(
"--log-cli-level"
) is not None or self._config.getini("log_cli")
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("collection")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for(self, item, when):
with self._runtest_for_main(item, when):
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for_main(self, item, when):
"""Implements the internals of pytest_runtest_xxx() hook."""
with catching_logs(
LogCaptureHandler(), formatter=self.formatter, level=self.log_level
) as log_handler:
if self.log_cli_handler:
self.log_cli_handler.set_when(when)
if item is None:
yield # run the test
return
if not hasattr(item, "catch_log_handlers"):
item.catch_log_handlers = {}
item.catch_log_handlers[when] = log_handler
item.catch_log_handler = log_handler
try:
yield # run test
finally:
if when == "teardown":
del item.catch_log_handler
del item.catch_log_handlers
if self.print_logs:
# Add a captured log section to the report.
log = log_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self._runtest_for(item, "setup"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self._runtest_for(item, "call"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self._runtest_for(item, "teardown"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logstart(self):
if self.log_cli_handler:
self.log_cli_handler.reset()
with self._runtest_for(None, "start"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logfinish(self):
with self._runtest_for(None, "finish"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logreport(self):
with self._runtest_for(None, "logreport"):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionfinish(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionfinish")
if self.log_file_handler is not None:
try:
with catching_logs(
self.log_file_handler, level=self.log_file_level
):
yield
finally:
# Close the FileHandler explicitly.
# (logging.shutdown might have lost the weakref?!)
self.log_file_handler.close()
else:
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionstart(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionstart")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session):
"""Runs all collected test items."""
if session.config.option.collectonly:
yield
return
if self._log_cli_enabled() and self._config.getoption("verbose") < 1:
# setting verbose flag is needed to avoid messy test progress output
self._config.option.verbose = 1
with self.live_logs_context():
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield # run all the tests
else:
yield # run all the tests
class _LiveLoggingStreamHandler(logging.StreamHandler):
"""
Custom StreamHandler used by the live logging feature: it will write a newline before the first log message
in each test.
During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured
and won't appear in the terminal.
"""
def __init__(self, terminal_reporter, capture_manager):
"""
:param _pytest.terminal.TerminalReporter terminal_reporter:
:param _pytest.capture.CaptureManager capture_manager:
"""
logging.StreamHandler.__init__(self, stream=terminal_reporter)
self.capture_manager = capture_manager
self.reset()
self.set_when(None)
self._test_outcome_written = False
def reset(self):
"""Reset the handler; should be called before the start of each test"""
self._first_record_emitted = False
def set_when(self, when):
"""Prepares for the given test phase (setup/call/teardown)"""
self._when = when
self._section_name_shown = False
if when == "start":
self._test_outcome_written = False
def emit(self, record):
ctx_manager = (
self.capture_manager.global_and_fixture_disabled()
if self.capture_manager
else nullcontext()
)
with ctx_manager:
if not self._first_record_emitted:
self.stream.write("\n")
self._first_record_emitted = True
elif self._when in ("teardown", "finish"):
if not self._test_outcome_written:
self._test_outcome_written = True
self.stream.write("\n")
if not self._section_name_shown and self._when:
self.stream.section("live log " + self._when, sep="-", bold=True)
self._section_name_shown = True
logging.StreamHandler.emit(self, record)
|
|
from .. xmlstream.stanzabase import registerStanzaPlugin, ElementBase, ET, JID
from .. stanza.iq import Iq
from .. stanza.message import Message
from .. basexmpp import basexmpp
from .. xmlstream.xmlstream import XMLStream
import logging
from . import xep_0004
class PubsubState(ElementBase):
namespace = 'http://jabber.org/protocol/psstate'
name = 'state'
plugin_attrib = 'psstate'
interfaces = set(('node', 'item', 'payload'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setPayload(self, value):
self.xml.append(value)
def getPayload(self):
childs = self.xml.getchildren()
if len(childs) > 0:
return childs[0]
def delPayload(self):
for child in self.xml.getchildren():
self.xml.remove(child)
registerStanzaPlugin(Iq, PubsubState)
class PubsubStateEvent(ElementBase):
namespace = 'http://jabber.org/protocol/psstate#event'
name = 'event'
plugin_attrib = 'psstate_event'
intefaces = set(tuple())
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Message, PubsubStateEvent)
registerStanzaPlugin(PubsubStateEvent, PubsubState)
class Pubsub(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'pubsub'
plugin_attrib = 'pubsub'
interfaces = set(tuple())
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Iq, Pubsub)
class PubsubOwner(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#owner'
name = 'pubsub'
plugin_attrib = 'pubsub_owner'
interfaces = set(tuple())
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Iq, PubsubOwner)
class Affiliation(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'affiliation'
plugin_attrib = name
interfaces = set(('node', 'affiliation'))
plugin_attrib_map = {}
plugin_tag_map = {}
class Affiliations(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'affiliations'
plugin_attrib = 'affiliations'
interfaces = set(tuple())
plugin_attrib_map = {}
plugin_tag_map = {}
subitem = (Affiliation,)
def append(self, affiliation):
if not isinstance(affiliation, Affiliation):
raise TypeError
self.xml.append(affiliation.xml)
return self.iterables.append(affiliation)
registerStanzaPlugin(Pubsub, Affiliations)
class Subscription(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'subscription'
plugin_attrib = name
interfaces = set(('jid', 'node', 'subscription', 'subid'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setjid(self, value):
self._setattr('jid', str(value))
def getjid(self):
return jid(self._getattr('jid'))
registerStanzaPlugin(Pubsub, Subscription)
class Subscriptions(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'subscriptions'
plugin_attrib = 'subscriptions'
interfaces = set(tuple())
plugin_attrib_map = {}
plugin_tag_map = {}
subitem = (Subscription,)
registerStanzaPlugin(Pubsub, Subscriptions)
class OptionalSetting(object):
interfaces = set(('required',))
def setRequired(self, value):
value = bool(value)
if value and not self['required']:
self.xml.append(ET.Element("{%s}required" % self.namespace))
elif not value and self['required']:
self.delRequired()
def getRequired(self):
required = self.xml.find("{%s}required" % self.namespace)
if required is not None:
return True
else:
return False
def delRequired(self):
required = self.xml.find("{%s}required" % self.namespace)
if required is not None:
self.xml.remove(required)
class SubscribeOptions(ElementBase, OptionalSetting):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'subscribe-options'
plugin_attrib = 'suboptions'
plugin_attrib_map = {}
plugin_tag_map = {}
interfaces = set(('required',))
registerStanzaPlugin(Subscription, SubscribeOptions)
class Item(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'item'
plugin_attrib = name
interfaces = set(('id', 'payload'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setPayload(self, value):
self.xml.append(value)
def getPayload(self):
childs = self.xml.getchildren()
if len(childs) > 0:
return childs[0]
def delPayload(self):
for child in self.xml.getchildren():
self.xml.remove(child)
class Items(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'items'
plugin_attrib = 'items'
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
subitem = (Item,)
registerStanzaPlugin(Pubsub, Items)
class Create(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'create'
plugin_attrib = name
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Pubsub, Create)
#class Default(ElementBase):
# namespace = 'http://jabber.org/protocol/pubsub'
# name = 'default'
# plugin_attrib = name
# interfaces = set(('node', 'type'))
# plugin_attrib_map = {}
# plugin_tag_map = {}
#
# def getType(self):
# t = self._getAttr('type')
# if not t: t == 'leaf'
# return t
#
#registerStanzaPlugin(Pubsub, Default)
class Publish(Items):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'publish'
plugin_attrib = name
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
subitem = (Item,)
registerStanzaPlugin(Pubsub, Publish)
class Retract(Items):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'retract'
plugin_attrib = name
interfaces = set(('node', 'notify'))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Pubsub, Retract)
class Unsubscribe(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'unsubscribe'
plugin_attrib = name
interfaces = set(('node', 'jid'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setJid(self, value):
self._setAttr('jid', str(value))
def getJid(self):
return JID(self._getAttr('jid'))
registerStanzaPlugin(Pubsub, Unsubscribe)
class Subscribe(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'subscribe'
plugin_attrib = name
interfaces = set(('node', 'jid'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setJid(self, value):
self._setAttr('jid', str(value))
def getJid(self):
return JID(self._getAttr('jid'))
registerStanzaPlugin(Pubsub, Subscribe)
class Configure(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'configure'
plugin_attrib = name
interfaces = set(('node', 'type'))
plugin_attrib_map = {}
plugin_tag_map = {}
def getType(self):
t = self._getAttr('type')
if not t: t == 'leaf'
return t
registerStanzaPlugin(Pubsub, Configure)
registerStanzaPlugin(Configure, xep_0004.Form)
class DefaultConfig(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#owner'
name = 'default'
plugin_attrib = 'default'
interfaces = set(('node', 'type', 'config'))
plugin_attrib_map = {}
plugin_tag_map = {}
def __init__(self, *args, **kwargs):
ElementBase.__init__(self, *args, **kwargs)
def getType(self):
t = self._getAttr('type')
if not t: t = 'leaf'
return t
def getConfig(self):
return self['form']
def setConfig(self, value):
self['form'].setStanzaValues(value.getStanzaValues())
return self
registerStanzaPlugin(PubsubOwner, DefaultConfig)
registerStanzaPlugin(DefaultConfig, xep_0004.Form)
class Options(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub'
name = 'options'
plugin_attrib = 'options'
interfaces = set(('jid', 'node', 'options'))
plugin_attrib_map = {}
plugin_tag_map = {}
def __init__(self, *args, **kwargs):
ElementBase.__init__(self, *args, **kwargs)
def getOptions(self):
config = self.xml.find('{jabber:x:data}x')
form = xep_0004.Form()
if config is not None:
form.fromXML(config)
return form
def setOptions(self, value):
self.xml.append(value.getXML())
return self
def delOptions(self):
config = self.xml.find('{jabber:x:data}x')
self.xml.remove(config)
def setJid(self, value):
self._setAttr('jid', str(value))
def getJid(self):
return JID(self._getAttr('jid'))
registerStanzaPlugin(Pubsub, Options)
registerStanzaPlugin(Subscribe, Options)
class OwnerAffiliations(Affiliations):
namespace = 'http://jabber.org/protocol/pubsub#owner'
interfaces = set(('node'))
plugin_attrib_map = {}
plugin_tag_map = {}
def append(self, affiliation):
if not isinstance(affiliation, OwnerAffiliation):
raise TypeError
self.xml.append(affiliation.xml)
return self.affiliations.append(affiliation)
registerStanzaPlugin(PubsubOwner, OwnerAffiliations)
class OwnerAffiliation(Affiliation):
namespace = 'http://jabber.org/protocol/pubsub#owner'
interfaces = set(('affiliation', 'jid'))
plugin_attrib_map = {}
plugin_tag_map = {}
class OwnerConfigure(Configure):
namespace = 'http://jabber.org/protocol/pubsub#owner'
interfaces = set(('node', 'config'))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(PubsubOwner, OwnerConfigure)
class OwnerDefault(OwnerConfigure):
namespace = 'http://jabber.org/protocol/pubsub#owner'
interfaces = set(('node', 'config'))
plugin_attrib_map = {}
plugin_tag_map = {}
def getConfig(self):
return self['form']
def setConfig(self, value):
self['form'].setStanzaValues(value.getStanzaValues())
return self
registerStanzaPlugin(PubsubOwner, OwnerDefault)
registerStanzaPlugin(OwnerDefault, xep_0004.Form)
class OwnerDelete(ElementBase, OptionalSetting):
namespace = 'http://jabber.org/protocol/pubsub#owner'
name = 'delete'
plugin_attrib = 'delete'
plugin_attrib_map = {}
plugin_tag_map = {}
interfaces = set(('node',))
registerStanzaPlugin(PubsubOwner, OwnerDelete)
class OwnerPurge(ElementBase, OptionalSetting):
namespace = 'http://jabber.org/protocol/pubsub#owner'
name = 'purge'
plugin_attrib = name
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(PubsubOwner, OwnerPurge)
class OwnerRedirect(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#owner'
name = 'redirect'
plugin_attrib = name
interfaces = set(('node', 'jid'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setJid(self, value):
self._setAttr('jid', str(value))
def getJid(self):
return JID(self._getAttr('jid'))
registerStanzaPlugin(OwnerDelete, OwnerRedirect)
class OwnerSubscriptions(Subscriptions):
namespace = 'http://jabber.org/protocol/pubsub#owner'
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
def append(self, subscription):
if not isinstance(subscription, OwnerSubscription):
raise TypeError
self.xml.append(subscription.xml)
return self.subscriptions.append(subscription)
registerStanzaPlugin(PubsubOwner, OwnerSubscriptions)
class OwnerSubscription(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#owner'
name = 'subscription'
plugin_attrib = name
interfaces = set(('jid', 'subscription'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setJid(self, value):
self._setAttr('jid', str(value))
def getJid(self):
return JID(self._getAttr('from'))
class Event(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'event'
plugin_attrib = 'pubsub_event'
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Message, Event)
class EventItem(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'item'
plugin_attrib = 'item'
interfaces = set(('id', 'payload'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setPayload(self, value):
self.xml.append(value)
def getPayload(self):
childs = self.xml.getchildren()
if len(childs) > 0:
return childs[0]
def delPayload(self):
for child in self.xml.getchildren():
self.xml.remove(child)
class EventRetract(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'retract'
plugin_attrib = 'retract'
interfaces = set(('id',))
plugin_attrib_map = {}
plugin_tag_map = {}
class EventItems(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'items'
plugin_attrib = 'items'
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
subitem = (EventItem, EventRetract)
registerStanzaPlugin(Event, EventItems)
class EventCollection(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'collection'
plugin_attrib = name
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Event, EventCollection)
class EventAssociate(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'associate'
plugin_attrib = name
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(EventCollection, EventAssociate)
class EventDisassociate(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'disassociate'
plugin_attrib = name
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(EventCollection, EventDisassociate)
class EventConfiguration(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'configuration'
plugin_attrib = name
interfaces = set(('node', 'config'))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Event, EventConfiguration)
registerStanzaPlugin(EventConfiguration, xep_0004.Form)
class EventPurge(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'purge'
plugin_attrib = name
interfaces = set(('node',))
plugin_attrib_map = {}
plugin_tag_map = {}
registerStanzaPlugin(Event, EventPurge)
class EventSubscription(ElementBase):
namespace = 'http://jabber.org/protocol/pubsub#event'
name = 'subscription'
plugin_attrib = name
interfaces = set(('node','expiry', 'jid', 'subid', 'subscription'))
plugin_attrib_map = {}
plugin_tag_map = {}
def setJid(self, value):
self._setAttr('jid', str(value))
def getJid(self):
return JID(self._getAttr('jid'))
registerStanzaPlugin(Event, EventSubscription)
|
|
from ..utils import Scraper
from bs4 import BeautifulSoup
from collections import OrderedDict
from operator import itemgetter
from pprint import pprint
from queue import Queue
from threading import Thread, Lock
from time import time
import os
import re
import sys
class Textbooks:
"""A scraper for UofT's book store.
UofT Book Store is located at http://uoftbookstore.com/.
"""
host = 'http://uoftbookstore.com'
threads = 32
@staticmethod
def scrape(location='.'):
"""Update the local JSON files for this scraper."""
Scraper.logger.info('Textbooks initialized.')
terms = Textbooks.retrieve_terms()
departments = Textbooks.retrieve_departments(terms)
# Get course info
ts = time()
queue = Queue()
for x in range(Textbooks.threads):
worker = CoursesWorker(queue)
worker.daemon = True
worker.start()
total = len(departments)
Scraper.logger.info('Queued %d departments. (1/3)' % total)
for department in departments:
queue.put((department, total))
queue.join()
Scraper.logger.info('Took %.2fs to retreive course info.' % (
time() - ts
))
# Get section info
ts = time()
queue = Queue()
for x in range(Textbooks.threads):
worker = SectionsWorker(queue)
worker.daemon = True
worker.start()
total = len(CoursesWorker.all_courses)
Scraper.logger.info('Queued %d courses. (2/3)' % total)
for course in CoursesWorker.all_courses:
queue.put((course, total))
queue.join()
Scraper.logger.info('Took %.2fs to retreive section info.' % (
time() - ts
))
# Get book info
ts = time()
queue = Queue()
for x in range(Textbooks.threads):
worker = BooksWorker(queue)
worker.daemon = True
worker.start()
total = len(SectionsWorker.all_sections)
Scraper.logger.info('Queued %d sections. (3/3)' % total)
for section in SectionsWorker.all_sections:
queue.put((section, total))
queue.join()
Scraper.logger.info('Took %.2fs to retreive book info.' % (
time() - ts
))
books = list(BooksWorker.all_books.values())
# Sort books and dump the files
for book in books:
book['courses'] = sorted(book['courses'], key=itemgetter('id'))
for i in range(len(book['courses'])):
book['courses'][i]['meeting_sections'] = \
sorted(book['courses'][i]['meeting_sections'],
key=itemgetter('code'))
Scraper.save_json(book, location, book['id'])
Scraper.logger.info('Textbooks completed.')
@staticmethod
def retrieve_terms():
html = Scraper.get('%s/buy_courselisting.asp' % Textbooks.host)
if html is None:
return []
listing = BeautifulSoup(html, "html.parser")
terms = listing.find(id='fTerm').find_all('option')[1:]
accepted_terms = []
for term in terms:
val = term.get_text()
if val.startswith('ST GEORGE') or val.startswith('MISSISSAUGA') \
or val.startswith('SCARBOROUGH'):
accepted_terms.append(term)
return accepted_terms
@staticmethod
def retrieve_departments(terms):
all_departments = []
for term in terms:
term_name = term.get_text()
m = re.search('(\d{5})', term_name)
session = m.group(0)
campus, term_id = term.get('value').split('|')
payload = {
'control': 'campus',
'campus': campus,
'term': term_id,
't': int(round(time() * 1000))
}
headers = {
'Referer': '%s/buy_courselisting.asp' % Textbooks.host
}
xml = Scraper.get('%s/textbooks_xml.asp' % Textbooks.host,
params=payload, headers=headers, max_attempts=3)
if xml is None:
continue
departments = BeautifulSoup(xml, "xml").find_all('department')
for department in departments:
all_departments.append({
'dept_id': department.get('id'),
'dept_name': department.get('name').title(),
'term_id': term_id,
'session': session
})
Scraper.logger.info(
'Retreived department info from %s.' % term_name)
return all_departments
@staticmethod
def retrieve_courses(department):
all_courses = []
payload = {
'control': 'department',
'dept': department['dept_id'],
'term': department['term_id'],
't': int(round(time() * 1000))
}
headers = {
'Referer': '%s/buy_courselisting.asp' % Textbooks.host
}
xml = Scraper.get('%s/textbooks_xml.asp' % Textbooks.host,
params=payload, headers=headers, max_attempts=3)
if xml is None:
return []
courses = BeautifulSoup(xml, "xml").find_all('course')
for course in courses:
all_courses.append({
'course_id': course.get('id'),
'course_name': course.get('name'),
'term_id': department['term_id'],
'session': department['session']
})
return all_courses
@staticmethod
def retrieve_sections(course):
all_sections = []
payload = {
'control': 'course',
'course': course['course_id'],
'term': course['term_id'],
't': int(round(time() * 1000))
}
headers = {
'Referer': '%s/buy_courselisting.asp' % Textbooks.host
}
xml = Scraper.get('%s/textbooks_xml.asp' % Textbooks.host,
params=payload, headers=headers, max_attempts=3)
if xml is None:
return []
sections = BeautifulSoup(xml, "xml").find_all('section')
for section in sections:
all_sections.append({
'section_id': section.get('id'),
'section_code': section.get('name'),
'section_instructor': section.get('instructor'),
'course_code': course['course_name'],
'session': course['session']
})
return all_sections
@staticmethod
def retrieve_books(section):
all_books = []
payload = {
'control': 'section',
'section': section['section_id'],
't': int(round(time() * 1000))
}
headers = {
'Referer': '%s/buy_courselisting.asp' % Textbooks.host
}
html = Scraper.get('%s/textbooks_xml.asp' % Textbooks.host,
params=payload, headers=headers, max_attempts=3)
if html is None:
return []
soup = BeautifulSoup(html, "html.parser")
books = soup.find_all('tr', {'class': 'book'})
if books == None:
return []
for book in books:
if len(book.get_text().strip()) == 0:
continue
image = book.find(class_='book-cover').img.get('src')
image = 'http://uoftbookstore.com/%s' % image
image = image.replace('Size=M', 'Size=L')
# This doesn't mean "avoid textbooks with no image"
# This is a case when the textbook is called "No required text"
if 'not_available_' in image:
continue
book_id = book.find(class_='product-field-pf_id').get('value')
url = '%s/buy_book_detail.asp?pf_id=%s' % (Textbooks.host, book_id)
title = Scraper.get_text_from_class(book, 'book-title')
edition = Scraper.get_text_from_class(book, 'book-edition')
if len(edition) > 0:
edition = ''.join(list(filter(str.isdigit, edition)))
try:
edition = int(edition)
except ValueError:
edition = 1
if edition == '' or 0:
edition = 1
author = Scraper.get_text_from_class(book, 'book-author')
m = re.search('([\d]+[E]?)', author)
if m != None:
junk = m.group(0)
author = author.replace(junk, '').strip()
isbn = Scraper.get_text_from_class(book, 'isbn')
requirement = Scraper.get_text_from_class(book, 'book-req')
requirement = requirement.lower()
price = Scraper.get_text_from_class(book, 'book-price-list')
try:
price = float(price[1:])
except ValueError:
price = 0
instructor = section['section_instructor'].split(',')
if len(instructor) == 2:
instructor = '%s %s' % (
instructor[0][:1],
instructor[1].strip()
)
instructor = instructor.strip()
else:
instructor = ''
instructors = [instructor]
if len(instructor) == 0:
instructors = []
meeting_sections = [OrderedDict([
("code", section['section_code']),
("instructors", instructors)
])]
course_id = '%s%s' % (section['course_code'], section['session'])
courses = [OrderedDict([
("id", course_id),
("code", section['course_code']),
("requirement", requirement),
("meeting_sections", meeting_sections)
])]
textbook = OrderedDict([
("id", book_id),
("isbn", isbn),
("title", title),
("edition", edition),
("author", author),
("image", image),
("price", price),
("url", url),
("courses", courses)
])
all_books.append(textbook)
return all_books
class CoursesWorker(Thread):
all_courses = []
done = 0
lock = Lock()
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
department, total = self.queue.get()
courses = Textbooks.retrieve_courses(department)
CoursesWorker.lock.acquire()
CoursesWorker.all_courses += courses
CoursesWorker.done += 1
Scraper.flush_percentage(CoursesWorker.done / total)
CoursesWorker.lock.release()
self.queue.task_done()
class SectionsWorker(Thread):
all_sections = []
done = 0
lock = Lock()
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
course, total = self.queue.get()
sections = Textbooks.retrieve_sections(course)
SectionsWorker.lock.acquire()
SectionsWorker.all_sections += sections
SectionsWorker.done += 1
Scraper.flush_percentage(SectionsWorker.done / total)
SectionsWorker.lock.release()
self.queue.task_done()
class BooksWorker(Thread):
all_books = {}
done = 0
lock = Lock()
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
section, total = self.queue.get()
books = Textbooks.retrieve_books(section)
BooksWorker.lock.acquire()
for book in books:
if book['id'] in BooksWorker.all_books:
index = -1
for i in range(len(BooksWorker.all_books[book['id']]['courses'])):
if BooksWorker.all_books[book['id']]['courses'][i]['id'] == book['courses'][0]['id']:
index = i
break
if index >= 0:
BooksWorker.all_books[book['id']]['courses'][index]['meeting_sections'] += book['courses'][0]['meeting_sections']
else:
BooksWorker.all_books[book['id']]['courses'] += book['courses']
else:
BooksWorker.all_books[book['id']] = book
BooksWorker.done += 1
Scraper.flush_percentage(BooksWorker.done / total)
BooksWorker.lock.release()
self.queue.task_done()
|
|
import os
import re
from time import sleep
from opensextant import Place
from opensextant.gazetteer import DB, estimate_name_bias, GazetteerIndex, get_default_db
from opensextant.utility import replace_diacritics, load_list
def filter_out_feature(pl: Place, feats):
"""
Filter out places by their feature type or by their name traits.
Long names (> 20 chars) and/or (>2 words) are relatively unique and not filtered.
Otherwise places are filtered if their feature code+class are designated as not useful
for a particular task. Eg., we don't want to tag short river names or streams (H/STM)...ever.
:param pl: Place object
:param feats: Pattern
:return:
"""
if not pl.feature_code:
return False
# Ignore trivial names:
plen = len(pl.name)
if plen < 2:
return True
fc = f"{pl.feature_class}/{pl.feature_code}"
for feat_filter in feats:
if feat_filter.match(fc):
return True
return False
def filter_in_feature(pl: Place, feats):
"""
:param pl: Place
:param feats: feature filters (regex)
:return:
"""
if not feats:
return True
fc = f"{pl.feature_class}/{pl.feature_code}"
for feat_filter in feats:
if feat_filter.match(fc):
return True
return False
def oddball_omissions(pl: Place):
if pl.feature_code == "RGNE":
if " " in pl.name:
toks = pl.name.split(" ")
last_token = toks[-1]
return last_token.isupper() and len(last_token) <= 3
# Awaiting other omission clauses here.
return False
class Finalizer:
def __init__(self, dbf, debug=False):
self.db = DB(dbf)
self.debug = debug
self.inter_country_delay = 2
def adjust_place_id(self):
self.db.create_indices()
decisions = {}
for pl in self.db.list_places(criteria=" where place_id = 'N-1'"):
if not pl.geohash:
continue
# NE outputs place id of NULL or "-1". This should rectify that by pulling in a decent place ID
# Geohash is a horrible way to filter data -- certain states just sit on the boundary of certain boxes.
for ghlen in [3, 2, 1, 0]:
gh = pl.geohash[0:ghlen]
key = f"{pl.country_code}#{pl.adm1}#{pl.feature_class}#{pl.feature_code}#{gh}"
distinct_ids = set([])
if key in decisions:
distinct_ids.add(decisions[key])
else:
# Avoid too much SQL queries ... record decisions made
criteria = f""" and feat_code='{pl.feature_code}'
and adm1 = '{pl.adm1}'
and place_id != 'N-1'
and geohash like '{gh}%'
"""
for model_pl in self.db.list_places(cc=pl.country_code, fc=pl.feature_class, criteria=criteria):
distinct_ids.add(model_pl.place_id)
#
print("IDS for:", pl.name, distinct_ids)
if len(distinct_ids) == 1:
plid = distinct_ids.pop()
decisions[key] = plid
self.db.update_place_id(pl.id, plid)
break
elif len(distinct_ids) > 1:
print("Ambiguous place ID resolution - no adjustment: ", pl.name, pl.country_code)
self.db.commit()
def adjust_bias(self):
self.db.create_indices()
print("Adjust Biasing")
# Fix significant place names: When loading gazetteer data for the first time,
# you do not have a global awareness of names/geography -- so things like biasing "common words" in wordstats
# leads us to mark valid common city names as "too common" and they are filtered out.
# Example: if you encounter "Beijing" (P/PPLX) is seen first and is marked as a common word
# and then "Beijing" (P/PPLC) is seen and exempted. You have two conflicting conclusions on the name "Beijing".
# The result being that only the captial Beijing will ever be used in tagging.
# FIX: loop through all names fc = "A" and P/PPLC, major cities population 200,000 or greater.
# re-mark the name_bias to positive if determined to be common words previously.
names_done = set([])
count_adjusted = 0
# admin_names = self.db.list_admin_names()
flag_fix_major_place_names = True
flag_fix_admin_codes = False # Addressed in-line through PlaceHueristics
if flag_fix_major_place_names:
# ============================================
# Find Names < 30 chars in general name_group where the names represent significant features.
# Recode those feature/names so ANY row by that name is not excluded by the "too common" judgement
sql_clause = """ where
source in ('U', 'N', 'G')
and name_type='N'
and name_group=''
and LENGTH(name) < 30
and feat_class in ('A', 'P')
and feat_code in ('ADM1', 'PPLC', 'PCL', 'PCLI')
and name NOT like '% %' order by name
"""
for pl in self.db.list_places(criteria=sql_clause):
names = {pl.name, replace_diacritics(pl.name)}
for name in names:
if name.lower() in names_done:
continue
if len(name) < 4:
continue
print(f"ADJUST: {name}")
name_bias = estimate_name_bias(name)
# ADJUSTED here is an approximation.
count_adjusted += 1
# For each name, remark each name and it variants.
self.db.update_bias_by_name(name_bias, name)
names_done.add(name.lower())
self.db.commit()
if flag_fix_admin_codes:
# ============================================
flip_ids = []
non_place_codes = os.path.join('etc', 'gazetteer', 'filters', 'non-placenames,admin-codes.csv')
IGNORE_CODES = set(load_list(non_place_codes))
for pl in self.db.list_places(fc="A",
criteria=" and name_type='C' and feat_code in ('ADM1','ADM2') and search_only=1"):
if pl.name in IGNORE_CODES:
continue
flip_ids.append(pl.id)
print(pl)
# Any rows found -- flip their search_only status.
self.db.update_bias(10, flip_ids)
self.db.commit()
def deduplicate(self):
"""
Finalize the gazetteer database to include an cleanup, deduplication, etc.
:return:
"""
#
# Finalize reviews places by country to identify distinct features to promote as primary
# entries and any "duplicates" can be marked as duplicate(dup=1). Primary entries are those such as:
#
# - Unique entries
# - Having attributes such as non-zero id or name bias
# - NGA & USGS gazetteers by default -- other gazetteers will be overlaid where name variants are offered
# for same feature / point ID / location
#
# Easiest way to break down gazetteer is:
# - by Country
# - by feature class or empty non-feature. ... Resolve any low-quality entries with empty feature.
self.db.create_indices()
cc_list = self.db.list_countries()
BASE_SOURCES = {"OA", "OG", "U", "UF", "N", "NF", "ISO"}
for cc in cc_list:
# Collect entries with
print(f"Country '{cc}'")
# base sources via OpenSextant gazetteer: OA, OG, U, UF, N, NF
base_sources = ",".join([f'"{src}"' for src in BASE_SOURCES])
keys = set([])
duplicates = []
# Collect all duplicate names within USGS and NGA base layers
sql = f"""select id, feat_class, source, geohash, adm1, name, place_id
from placenames where cc='{cc}' and source in ({base_sources}) and duplicate=0"""
self._collect_duplicates(sql, keys, duplicates, label="Base")
# De-duplicate other sources that leverage USGS/NGA as base sources.
sql = f"""select id, feat_class, source, geohash, adm1, name, place_id
from placenames where cc='{cc}' and source not in ({base_sources}) and duplicate=0"""
self._collect_duplicates(sql, keys, duplicates, label="Other Sources")
self.db.mark_duplicates(duplicates)
print("Complete De-duplicating")
def _collect_duplicates(self, sql, keys, dups, label="NA"):
"""
specialized sql row is dictionary of "id, feat_class, source, geohash, adm1, name, place_id "
:param sql:
:param keys:
:param dups:
:param label:
:return:
"""
for row in self.db.conn.execute(sql):
fc = row["feat_class"]
loc = row["geohash"]
nm = row["name"].lower()
a1 = row["adm1"]
k = f"{fc}/{loc[0:5]}/{a1}/{nm}"
if k in keys:
if self.debug: print(f"{label} dup: ", row["id"])
dups.append(row["id"])
else:
# Unique entry
keys.add(k)
def index(self, url, features=None, ignore_features=None, ignore_func=None,
ignore_digits=True, ignore_names=False, limit=-1):
print("Xponents Gazetteer Finalizer: INDEX")
indexer = GazetteerIndex(url)
# indexer.commit_rate = 100000
indexer.commit_rate = -1
#
filters = []
if ignore_features:
for f in ignore_features:
filters.append(re.compile(f))
inclusion_filters = []
if features:
for f in features:
inclusion_filters.append(re.compile(f))
default_criteria = " and duplicate=0"
if ignore_names:
default_criteria = " and duplicate=0 and name_type!='N'"
# For each row in DB, index to Solr. Maybe organize batches by row ID where dup=0.
cc_list = self.db.list_countries()
for cc in cc_list:
print(f"Country '{cc}'")
for pl in self.db.list_places(cc=cc, criteria=default_criteria, limit=limit):
if ignore_func:
if ignore_func(pl):
continue
if filter_out_feature(pl, filters):
continue
if ignore_digits and pl.name.isdigit():
continue
if not filter_in_feature(pl, inclusion_filters):
continue
indexer.add(pl)
sleep(self.inter_country_delay)
# Done with country
indexer.save(done=True)
print(f"Indexed {indexer.count}")
indexer.save(done=True)
def index_codes(self, url):
print("Xponents Gazetteer Finalizer: INDEX CODES, ABBREV")
indexer = GazetteerIndex(url)
indexer.commit_rate = -1
default_criteria = " where duplicate=0 and name_type!='N'"
for pl in self.db.list_places(criteria=default_criteria):
indexer.add(pl)
print(f"Indexed {indexer.count}")
indexer.save(done=True)
class PostalIndexer(Finalizer):
def __init__(self, dbf, **kwargs):
Finalizer.__init__(self, dbf, **kwargs)
self.inter_country_delay = 1
def finalize(self, limit=-1):
# No optimization on postal codes.
pass
def index(self, url, ignore_digits=False, **kwargs):
# Finalizer indexes postal data as-is. No digit or stop filters.
Finalizer.index(self, url, ignore_digits=False, **kwargs)
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument("operation", help="adjust-id, dedup, adjust-bias, index -- pick one. Run all three in that order")
ap.add_argument("--db", default=get_default_db())
ap.add_argument("--max", help="maximum rows to process for testing", default=-1)
ap.add_argument("--debug", action="store_true", default=False)
ap.add_argument("--solr", help="Solr URL")
ap.add_argument("--optimize", action="store_true", default=False)
ap.add_argument("--postal", action="store_true", default=False)
args = ap.parse_args()
gaz = None
if args.operation == "index" and args.solr:
# Features not as present in general data include: WELLS, STREAMS, SPRINGS, HILLS.
#
if args.postal:
# Postal Codes
gaz = PostalIndexer(args.db, debug=args.debug)
gaz.stop_filters = None
gaz.index(args.solr, ignore_digits=False, limit=int(args.max))
else:
gaz = Finalizer(args.db, debug=args.debug)
gaz.index(args.solr, ignore_digits=True, limit=int(args.max),
ignore_func=oddball_omissions,
ignore_features={"H/WLL.*",
"H/STM[ABCDHIQSBX]+",
"H/SPNG.*",
"T/HLL.*"})
elif args.operation == "adjust-id":
gaz = Finalizer(args.db, debug=args.debug)
gaz.adjust_place_id()
elif args.operation == "adjust-bias":
gaz = Finalizer(args.db, debug=args.debug)
gaz.adjust_bias()
elif args.operation == "dedup":
gaz = Finalizer(args.db, debug=args.debug)
gaz.deduplicate()
# Finish up.
if gaz:
if args.optimize:
gaz.db.optimize()
gaz.db.close()
|
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' submit.py '''
import glob
import logging
import os
import shutil
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.proto import topology_pb2
import heron.tools.cli.src.python.args as cli_args
import heron.tools.cli.src.python.execute as execute
import heron.tools.cli.src.python.jars as jars
import heron.tools.cli.src.python.opts as opts
import heron.tools.common.src.python.utils.config as config
import heron.tools.common.src.python.utils.classpath as classpath
# pylint: disable=too-many-return-statements
################################################################################
def create_parser(subparsers):
'''
Create a subparser for the submit command
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'submit',
help='Submit a topology',
usage="%(prog)s [options] cluster/[role]/[env] " + \
"topology-file-name topology-class-name [topology-args]",
add_help=False
)
cli_args.add_titles(parser)
cli_args.add_cluster_role_env(parser)
cli_args.add_topology_file(parser)
cli_args.add_topology_class(parser)
cli_args.add_config(parser)
cli_args.add_deactive_deploy(parser)
cli_args.add_extra_launch_classpath(parser)
cli_args.add_system_property(parser)
cli_args.add_verbose(parser)
parser.set_defaults(subcommand='submit')
return parser
################################################################################
def launch_a_topology(cl_args, tmp_dir, topology_file, topology_defn_file):
'''
Launch a topology given topology jar, its definition file and configurations
:param cl_args:
:param tmp_dir:
:param topology_file:
:param topology_defn_file:
:return:
'''
# get the normalized path for topology.tar.gz
topology_pkg_path = config.normalized_class_path(os.path.join(tmp_dir, 'topology.tar.gz'))
# get the release yaml file
release_yaml_file = config.get_heron_release_file()
# create a tar package with the cluster configuration and generated config files
config_path = cl_args['config_path']
tar_pkg_files = [topology_file, topology_defn_file]
generated_config_files = [release_yaml_file, cl_args['override_config_file']]
config.create_tar(topology_pkg_path, tar_pkg_files, config_path, generated_config_files)
# pass the args to submitter main
args = [
"--cluster", cl_args['cluster'],
"--role", cl_args['role'],
"--environment", cl_args['environ'],
"--heron_home", config.get_heron_dir(),
"--config_path", config_path,
"--override_config_file", cl_args['override_config_file'],
"--release_file", release_yaml_file,
"--topology_package", topology_pkg_path,
"--topology_defn", topology_defn_file,
"--topology_bin", topology_file # pex file if pex specified
]
if Log.getEffectiveLevel() == logging.DEBUG:
args.append("--verbose")
lib_jars = config.get_heron_libs(
jars.scheduler_jars() + jars.uploader_jars() + jars.statemgr_jars() + jars.packing_jars()
)
extra_jars = cl_args['extra_launch_classpath'].split(':')
# invoke the submitter to submit and launch the topology
execute.heron_class(
class_name='com.twitter.heron.scheduler.SubmitterMain',
lib_jars=lib_jars,
extra_jars=extra_jars,
args=args,
java_defines=[]
)
################################################################################
def launch_topologies(cl_args, topology_file, tmp_dir):
'''
Launch topologies
:param cl_args:
:param topology_file:
:param tmp_dir:
:return:
'''
# the submitter would have written the .defn file to the tmp_dir
defn_files = glob.glob(tmp_dir + '/*.defn')
if len(defn_files) == 0:
raise Exception("No topologies found")
try:
for defn_file in defn_files:
# load the topology definition from the file
topology_defn = topology_pb2.Topology()
try:
handle = open(defn_file, "rb")
topology_defn.ParseFromString(handle.read())
handle.close()
except:
raise Exception("Could not open and parse topology defn file %s" % defn_file)
# launch the topology
try:
Log.info("Launching topology \'%s\'" % topology_defn.name)
launch_a_topology(cl_args, tmp_dir, topology_file, defn_file)
Log.info("Topology \'%s\' launched successfully" % topology_defn.name)
except Exception as ex:
Log.exception('Failed to launch topology \'%s\' because %s' % (topology_defn.name, str(ex)))
raise
except:
raise
################################################################################
def submit_fatjar(cl_args, unknown_args, tmp_dir):
'''
We use the packer to make a package for the jar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology_class_name. The submitter
inside will write out the topology defn file to a location that
we specify. Then we write the topology defn file to a well known
location. We then write to appropriate places in zookeeper
and launch the scheduler jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
'''
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
try:
execute.heron_class(
class_name=cl_args['topology-class-name'],
lib_jars=config.get_heron_libs(jars.topology_jars()),
extra_jars=[topology_file],
args=tuple(unknown_args),
java_defines=cl_args['topology_main_jvm_property'])
except Exception as ex:
Log.debug(traceback.format_exc(ex))
Log.error("Unable to execute topology main class")
return False
try:
launch_topologies(cl_args, topology_file, tmp_dir)
except Exception as ex:
return False
finally:
shutil.rmtree(tmp_dir)
return True
################################################################################
def submit_tar(cl_args, unknown_args, tmp_dir):
'''
Extract and execute the java files inside the tar and then add topology
definition file created by running submitTopology
We use the packer to make a package for the tar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology class name.
The submitter inside will write out the topology defn file to a location
that we specify. Then we write the topology defn file to a well known
packer location. We then write to appropriate places in zookeeper
and launch the aurora jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
'''
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
java_defines = cl_args['topology_main_jvm_property']
execute.heron_tar(
cl_args['topology-class-name'],
topology_file,
tuple(unknown_args),
tmp_dir,
java_defines)
try:
launch_topologies(cl_args, topology_file, tmp_dir)
except Exception:
return False
finally:
shutil.rmtree(tmp_dir)
return True
################################################################################
# Execute the pex file to create topology definition file by running
# the topology's main class.
################################################################################
# pylint: disable=unused-argument
def submit_pex(cl_args, unknown_args, tmp_dir):
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
topology_class_name = cl_args['topology-class-name']
try:
execute.heron_pex(topology_file, topology_class_name, tuple(unknown_args))
except Exception as ex:
Log.error("Error when loading a topology: %s" % str(ex))
return False
try:
launch_topologies(cl_args, topology_file, tmp_dir)
except Exception as ex:
return False
finally:
shutil.rmtree(tmp_dir)
return True
################################################################################
# pylint: disable=unused-argument
def run(command, parser, cl_args, unknown_args):
'''
Submits the topology to the scheduler
* Depending on the topology file name extension, we treat the file as a
fatjar (if the ext is .jar) or a tar file (if the ext is .tar/.tar.gz).
* We upload the topology file to the packer, update zookeeper and launch
scheduler jobs representing that topology
* You can see your topology in Heron UI
:param command:
:param parser:
:param cl_args:
:param unknown_args:
:return:
'''
# get the topology file name
topology_file = cl_args['topology-file-name']
# check to see if the topology file exists
if not os.path.isfile(topology_file):
Log.error("Topology jar|tar|pex file %s does not exist" % topology_file)
return False
# check if it is a valid file type
jar_type = topology_file.endswith(".jar")
tar_type = topology_file.endswith(".tar") or topology_file.endswith(".tar.gz")
pex_type = topology_file.endswith(".pex")
if not jar_type and not tar_type and not pex_type:
Log.error("Unknown file type. Please use .tar or .tar.gz or .jar or .pex file")
return False
# check if extra launch classpath is provided and if it is validate
if cl_args['extra_launch_classpath']:
valid_classpath = classpath.valid_java_classpath(cl_args['extra_launch_classpath'])
if not valid_classpath:
Log.error("One of jar or directory in extra launch classpath does not exist")
return False
# create a temporary directory for topology definition file
tmp_dir = tempfile.mkdtemp()
# if topology needs to be launched in deactivated state, do it so
if cl_args['deploy_deactivated']:
initial_state = topology_pb2.TopologyState.Name(topology_pb2.PAUSED)
else:
initial_state = topology_pb2.TopologyState.Name(topology_pb2.RUNNING)
# set the tmp dir and deactivated state in global options
opts.set_config('cmdline.topologydefn.tmpdirectory', tmp_dir)
opts.set_config('cmdline.topology.initial.state', initial_state)
# check the extension of the file name to see if it is tar/jar file.
if jar_type:
return submit_fatjar(cl_args, unknown_args, tmp_dir)
elif tar_type:
return submit_tar(cl_args, unknown_args, tmp_dir)
elif pex_type:
return submit_pex(cl_args, unknown_args, tmp_dir)
return False
|
|
'''
nlp:part of the wordfish python package: extracting relationships of terms from corpus
functions for simple natural language processing
Copyright (c) 2015-2018 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to
do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from wordfish.terms import check_nltk
from textblob import TextBlob, Word
from nltk.corpus import stopwords
from nltk.stem.porter import *
from nltk.stem import *
import nltk.data
import pandas
import gensim
import re
# Ensure has downloaded data
check_nltk()
def remove_nonenglish_chars(text):
return re.sub("[^a-zA-Z]", " ", text)
def text2sentences(text, remove_non_english_chars=True):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
if remove_non_english_chars:
text = remove_nonenglish_chars(text)
for s in tokenizer.tokenize(text):
yield s
def equation2tokens(tex):
'''walk through a LaTeX string, and grab chunks that correspond with known
identifiers, meaning anything that starts with \ and ends with one or
more whitespaces, a bracket, a ^ or underscore.
'''
regexp = r'\\(.*?)(\w+|\{|\(|\_|\^)'
tokens = []
while re.search(regexp, tex) and len(tex) > 0:
match = re.search(regexp, tex)
# Only take the chunk if it's starting at 0
if match.start() == 0:
tokens.append(tex[match.start():match.end()])
# And update the string
tex = tex[match.end():]
# Otherwise, add the next character to the tokens list
else:
tokens.append(tex[0])
tex = tex[1:]
# When we get down here, the regexp doesn't match anymore! Add remaining
if len(tex) > 0:
tokens = tokens + [t for t in tex]
return tokens
def processText(text):
'''combines text2sentences and sentence2words
Parameters
==========
text: the raw string of text to process
'''
vector = []
for line in text2sentences(text):
words = sentence2words(line)
vector = vector + words
return vector
def sentence2words(sentence,remove_stop_words=True,lower=True):
if isinstance(sentence, list, tuple):
sentence = sentence[0]
re_white_space = re.compile("\s+")
stop_words = set(stopwords.words("english"))
# The user wants to make all letters lowercase
if lower:
sentence = sentence.lower()
words = re_white_space.split(sentence.strip())
# Remove stop words
if remove_stop_words:
words = [w for w in words if w not in stop_words]
return words
def do_stem(words,return_unique=True,remove_non_english_words=True):
'''do_stem
Parameters
==========
words: str/list
one or more words to be stemmed
return_unique: boolean (default True)
return unique terms
'''
stemmer = PorterStemmer()
if isinstance(words,str):
words = [words]
stems = []
for word in words:
if remove_non_english_words:
word = re.sub("[^a-zA-Z]", " ", word)
stems.append(stemmer.stem(word))
if return_unique:
return list(set([s.lower() for s in stems]))
else:
return stems
def get_total_words(text):
'''get_total_words:
get total words in a text (dict, string, or list)
Parameters
==========
text: str,dict,list
some text content to parse to count total words
Returns
=======
totalwords: int
total count of words
'''
totalwords = 0
# Dictionary
if isinstance(text,dict):
for label,sentences in text.items():
if isinstance(sentences,str):
sentences = [sentences]
for sentence in sentences:
blob = TextBlob(sentence)
words = do_stem(blob.words)
totalwords += len(words)
return totalwords
# String or list
elif isinstance(text,str):
text = [text]
for sentence in text:
blob = TextBlob(sentence)
words = do_stem(blob.words)
totalwords += len(words)
return totalwords
def get_term_counts(terms,text):
'''get_term_counts:
a wrapper for get_term_counts_dict and get_term_counts_list
will return a pandas data frame of counts for a list of terms of interst
Parameters
==========
text: dict,list,str
some text content to parse to count a number of terms
terms: str,list
one or more terms to be stemmed and counted in the text
Returns
=======
totalwords: int
total count of words
'''
if isinstance(text,dict):
return get_term_counts_dict(terms,text)
elif isinstance(text,str):
text = [text]
elif isinstance(text,list):
return get_term_counts_list(terms,text)
def get_term_counts_list(terms,text):
# Convert words into stems
stems = do_stem(terms)
# data frame hold counts
counts = pandas.DataFrame(0,columns=["count"],index=stems)
for sentence in text:
blob = TextBlob(sentence)
words = do_stem(blob.words)
words = [w for w in words if w in stems]
counts.loc[words] = counts.loc[words] + 1
return counts
def get_term_counts_dict(terms,text):
# Convert words into stems
stems = do_stem(terms)
# data frame hold counts
counts = pandas.DataFrame(0,columns=["count"],index=stems)
for label,sentences in text.items():
if isinstance(sentences,str):
sentences = [sentences]
for sentence in sentences:
blob = TextBlob(sentence)
words = do_stem(blob.words)
words = [w for w in words if w in stems]
counts.loc[words] = counts.loc[words] + 1
return counts
# Return list of stemmed phrases
def stem_phrases(phrases):
stemmed = []
for phrase in phrases:
phrase = phrase.split(" ")
if isinstance(phrase,str):
phrase = [phrase]
single_stemmed = do_stem(phrase)
stemmed.append(" ".join(single_stemmed).encode("utf-8"))
return stemmed
def get_match(phrasematch,entirephrase,found_indices):
'''
get_match:
Function to get a match: start, length, text, from a sentence
Returns dictionary with:
start_index
length
text
found_indices: updated binary [0,1] list of found indices in entirephrase
'''
full_concept = phrasematch.split(" ")
foundmatch = True
# We should not find words that have already been found :)
findices = [i for i in range(0,len(found_indices)) if found_indices[i] == 1]
for found_index in findices:
entirephrase[found_index] = "XXXXXXXXXXXXXXXX"
indices = []
for word in full_concept:
if word in entirephrase:
indices.append(entirephrase.index(word))
# Missing any one word, not a match
else:
foundmatch = False
if len(set(indices)) == len(full_concept):
for i in range(0,len(indices)-1):
# Not in chronological order +1, not a match
if indices[i]+1 != indices[i+1]:
foundmatch=False
# Missing any one word, not a match
else:
foundmatch = False
if foundmatch == True:
start_index = entirephrase.index(full_concept[0])
length = len(full_concept)
text = entirephrase[start_index:start_index+length]
# Update found indices
found_indices[start_index:start_index+length]=1
else:
start_index = 0
length = 0
text = ""
result = {"start_index":start_index,
"length":length,
"text":text,
"found_indices":found_indices}
return result
def find_phrases(words,vocabulary,repeat=1):
'''
words: a list of words
vocabulary: a list of words / phrases to find in the words
repeat: the number of times to run over the phrase
(in case of repeats of same in one sentence)
Returns:
(words_index,vocab_index,word,vocab)
'''
vocabulary = list(set(vocabulary))
vocabulary = [v.encode("utf-8") for v in vocabulary]
# We will stem phrases, and search for them across the stemmed words
vocab_stemmed = stem_phrases(vocabulary)
stemmed = [s.encode("utf-8") for s in do_stem(words,return_unique=False)]
# Make a long regular expression
regexp = "*|".join(vocab_stemmed) + "*"
phrases = []
# Make lookups to return to original vocab and terms
vocab_lookup = make_lookup(vocabulary,vocab_stemmed)
words_lookup = make_lookup(words,stemmed)
# We run it twice in case of repeats in a sentence
for r in range(0,repeat):
# Search the sentence for any concepts:
if re.search(regexp," ".join(stemmed)):
for c in range(0,len(stemmed)):
for v in range(len(vocab_stemmed)):
single_stemmed = vocab_stemmed[v]
if re.match("%s" %(stemmed[c]),single_stemmed):
phrases.append((c, v, words_lookup[stemmed[c]],vocab_lookup[vocab_stemmed[v]]))
return phrases
def make_lookup(original_list,new_list):
lookup = dict()
for x in range(len(new_list)):
lookup[new_list[x]] = original_list[x]
return lookup
|
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
import mock
import six
import sys
from keystoneauth1 import fixture
import requests
AUTH_TOKEN = "foobar"
AUTH_URL = "http://0.0.0.0"
USERNAME = "itchy"
PASSWORD = "scratchy"
PROJECT_NAME = "poochie"
REGION_NAME = "richie"
INTERFACE = "catchy"
VERSION = "3"
TEST_RESPONSE_DICT = fixture.V2Token(token_id=AUTH_TOKEN,
user_name=USERNAME)
_s = TEST_RESPONSE_DICT.add_service('identity', name='keystone')
_s.add_endpoint(AUTH_URL + ':5000/v2.0')
_s = TEST_RESPONSE_DICT.add_service('network', name='neutron')
_s.add_endpoint(AUTH_URL + ':9696')
_s = TEST_RESPONSE_DICT.add_service('compute', name='nova')
_s.add_endpoint(AUTH_URL + ':8774/v2')
_s = TEST_RESPONSE_DICT.add_service('image', name='glance')
_s.add_endpoint(AUTH_URL + ':9292')
_s = TEST_RESPONSE_DICT.add_service('object', name='swift')
_s.add_endpoint(AUTH_URL + ':8080/v1')
TEST_RESPONSE_DICT_V3 = fixture.V3Token(user_name=USERNAME)
TEST_RESPONSE_DICT_V3.set_project_scope()
TEST_VERSIONS = fixture.DiscoveryList(href=AUTH_URL)
class FakeStdout(object):
def __init__(self):
self.content = []
def write(self, text):
self.content.append(text)
def make_string(self):
result = ''
for line in self.content:
result = result + line
return result
class FakeLog(object):
def __init__(self):
self.messages = {}
def debug(self, msg):
self.messages['debug'] = msg
def info(self, msg):
self.messages['info'] = msg
def warning(self, msg):
self.messages['warning'] = msg
def error(self, msg):
self.messages['error'] = msg
def critical(self, msg):
self.messages['critical'] = msg
class FakeApp(object):
def __init__(self, _stdout, _log):
self.stdout = _stdout
self.client_manager = None
self.stdin = sys.stdin
self.stdout = _stdout or sys.stdout
self.stderr = sys.stderr
self.log = _log
class FakeOptions(object):
def __init__(self, **kwargs):
self.os_beta_command = False
class FakeClient(object):
def __init__(self, **kwargs):
self.endpoint = kwargs['endpoint']
self.token = kwargs['token']
class FakeClientManager(object):
_api_version = {
'image': '2',
}
def __init__(self):
self.compute = None
self.identity = None
self.image = None
self.object_store = None
self.volume = None
self.network = None
self.session = None
self.auth_ref = None
self.auth_plugin_name = None
self.network_endpoint_enabled = True
def get_configuration(self):
return {
'auth': {
'username': USERNAME,
'password': PASSWORD,
'token': AUTH_TOKEN,
},
'region': REGION_NAME,
'identity_api_version': VERSION,
}
def is_network_endpoint_enabled(self):
return self.network_endpoint_enabled
class FakeModule(object):
def __init__(self, name, version):
self.name = name
self.__version__ = version
class FakeResource(object):
def __init__(self, manager=None, info=None, loaded=False, methods=None):
"""Set attributes and methods for a resource.
:param manager:
The resource manager
:param Dictionary info:
A dictionary with all attributes
:param bool loaded:
True if the resource is loaded in memory
:param Dictionary methods:
A dictionary with all methods
"""
info = info or {}
methods = methods or {}
self.__name__ = type(self).__name__
self.manager = manager
self._info = info
self._add_details(info)
self._add_methods(methods)
self._loaded = loaded
def _add_details(self, info):
for (k, v) in six.iteritems(info):
setattr(self, k, v)
def _add_methods(self, methods):
"""Fake methods with MagicMock objects.
For each <@key, @value> pairs in methods, add an callable MagicMock
object named @key as an attribute, and set the mock's return_value to
@value. When users access the attribute with (), @value will be
returned, which looks like a function call.
"""
for (name, ret) in six.iteritems(methods):
method = mock.MagicMock(return_value=ret)
setattr(self, name, method)
def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def keys(self):
return self._info.keys()
@property
def info(self):
return self._info
class FakeResponse(requests.Response):
def __init__(self, headers=None, status_code=200,
data=None, encoding=None):
super(FakeResponse, self).__init__()
headers = headers or {}
self.status_code = status_code
self.headers.update(headers)
self._content = json.dumps(data)
if not isinstance(self._content, six.binary_type):
self._content = self._content.encode()
class FakeModel(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
|
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
from scipy.stats import \
binned_statistic, binned_statistic_2d, binned_statistic_dd
class TestBinnedStatistic(object):
@classmethod
def setup_class(cls):
np.random.seed(9865)
cls.x = np.random.random(100)
cls.y = np.random.random(100)
cls.v = np.random.random(100)
cls.X = np.random.random((100, 3))
def test_1d_count(self):
x = self.x
v = self.v
count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
count2, edges2 = np.histogram(x, bins=10)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(edges1, edges2)
def test_1d_sum(self):
x = self.x
v = self.v
sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
sum2, edges2 = np.histogram(x, bins=10, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(edges1, edges2)
def test_1d_mean(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_std(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_median(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_bincode(self):
x = self.x[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
1, 2, 1])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
assert_array_almost_equal(bcount, count1)
def test_2d_count(self):
x = self.x
y = self.y
v = self.v
count1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'count', bins=5)
count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_sum(self):
x = self.x
y = self.y
v = self.v
sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_mean(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_std(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_median(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'median', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.median, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_bincode(self):
x = self.x[:20]
y = self.y[:20]
v = self.v[:20]
count1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'count', bins=3)
bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
6, 11, 16, 6, 6, 11, 8])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_array_almost_equal(bcount, count1adj)
def test_dd_count(self):
X = self.X
v = self.v
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
count2, edges2 = np.histogramdd(X, bins=3)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(edges1, edges2)
def test_dd_sum(self):
X = self.X
v = self.v
sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(edges1, edges2)
def test_dd_mean(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_std(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_median(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_bincode(self):
X = self.X[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
32, 36, 91, 43, 87, 81, 81])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_array_almost_equal(bcount, count1adj)
if __name__ == "__main__":
run_module_suite()
|
|
"""
dj-stripe Charge Model Tests.
"""
from copy import deepcopy
from decimal import Decimal
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test.testcases import TestCase
from djstripe.enums import ChargeStatus, LegacySourceType
from djstripe.models import (
Account, Charge, Dispute, DjstripePaymentMethod, PaymentMethod
)
from . import (
FAKE_ACCOUNT, FAKE_BALANCE_TRANSACTION, FAKE_BALANCE_TRANSACTION_REFUND, FAKE_CHARGE,
FAKE_CHARGE_REFUNDED, FAKE_CUSTOMER, FAKE_FILEUPLOAD, FAKE_INVOICE, FAKE_REFUND,
FAKE_SUBSCRIPTION, FAKE_TRANSFER, AssertStripeFksMixin, default_account
)
class ChargeTest(AssertStripeFksMixin, TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="user", email="user@example.com"
)
self.customer = FAKE_CUSTOMER.create_for_user(self.user)
self.account = default_account()
def test_str(self):
charge = Charge(
amount=50,
currency="usd",
id="ch_test",
status=ChargeStatus.failed,
captured=False,
paid=False,
)
self.assertEqual(str(charge), "$50.00 USD (Uncaptured)")
charge.captured = True
self.assertEqual(str(charge), "$50.00 USD (Failed)")
charge.status = ChargeStatus.succeeded
charge.dispute = Dispute()
self.assertEqual(str(charge), "$50.00 USD (Disputed)")
charge.dispute = None
charge.refunded = True
charge.amount_refunded = 50
self.assertEqual(str(charge), "$50.00 USD (Refunded)")
charge.refunded = False
self.assertEqual(str(charge), "$50.00 USD (Partially refunded)")
charge.amount_refunded = 0
self.assertEqual(str(charge), "$50.00 USD")
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Charge.retrieve")
def test_capture_charge(self, charge_retrieve_mock, default_account_mock):
default_account_mock.return_value = self.account
fake_charge_no_invoice = deepcopy(FAKE_CHARGE)
fake_charge_no_invoice.update({"invoice": None})
charge_retrieve_mock.return_value = fake_charge_no_invoice
charge, created = Charge._get_or_create_from_stripe_object(fake_charge_no_invoice)
self.assertTrue(created)
captured_charge = charge.capture()
self.assertTrue(captured_charge.captured)
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.dispute",
"djstripe.Charge.invoice",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.BalanceTransaction.retrieve")
@patch("stripe.Charge.retrieve")
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
def test_sync_from_stripe_data(
self,
subscription_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
balance_transaction_retrieve_mock,
default_account_mock,
):
default_account_mock.return_value = self.account
fake_charge_copy = deepcopy(FAKE_CHARGE)
fake_charge_copy.update({"application_fee": {"amount": 0}})
charge = Charge.sync_from_stripe_data(fake_charge_copy)
self.assertEqual(Decimal("22"), charge.amount)
self.assertEqual(True, charge.paid)
self.assertEqual(False, charge.refunded)
self.assertEqual(True, charge.captured)
self.assertEqual(False, charge.disputed)
self.assertEqual("VideoDoc consultation for ivanp0001 berkp0001", charge.description)
self.assertEqual(0, charge.amount_refunded)
self.assertEqual("card_16YKQh2eZvKYlo2Cblc5Feoo", charge.source_id)
self.assertEqual(charge.source.type, LegacySourceType.card)
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_not_called()
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.dispute",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Charge.retrieve")
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
def test_sync_from_stripe_data_refunded_on_update(
self,
subscription_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
default_account_mock,
):
# first sync charge (as per test_sync_from_stripe_data) then sync refunded version,
# to hit the update code-path instead of insert
from djstripe.settings import STRIPE_SECRET_KEY
default_account_mock.return_value = self.account
fake_charge_copy = deepcopy(FAKE_CHARGE)
with patch(
"stripe.BalanceTransaction.retrieve", return_value=deepcopy(FAKE_BALANCE_TRANSACTION)
):
charge = Charge.sync_from_stripe_data(fake_charge_copy)
self.assertEqual(Decimal("22"), charge.amount)
self.assertEqual(True, charge.paid)
self.assertEqual(False, charge.refunded)
self.assertEqual(True, charge.captured)
self.assertEqual(False, charge.disputed)
self.assertEqual(len(charge.refunds.all()), 0)
fake_charge_refunded_copy = deepcopy(FAKE_CHARGE_REFUNDED)
with patch(
"stripe.BalanceTransaction.retrieve",
return_value=deepcopy(FAKE_BALANCE_TRANSACTION_REFUND),
) as balance_transaction_retrieve_mock:
charge_refunded = Charge.sync_from_stripe_data(fake_charge_refunded_copy)
self.assertEqual(charge.id, charge_refunded.id)
self.assertEqual(Decimal("22"), charge_refunded.amount)
self.assertEqual(True, charge_refunded.paid)
self.assertEqual(True, charge_refunded.refunded)
self.assertEqual(True, charge_refunded.captured)
self.assertEqual(False, charge_refunded.disputed)
self.assertEqual(
"VideoDoc consultation for ivanp0001 berkp0001", charge_refunded.description
)
self.assertEqual(charge_refunded.amount, charge_refunded.amount_refunded)
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_called_once_with(
api_key=STRIPE_SECRET_KEY, expand=[], id=FAKE_BALANCE_TRANSACTION_REFUND["id"]
)
refunds = list(charge_refunded.refunds.all())
self.assertEqual(len(refunds), 1)
refund = refunds[0]
self.assertEqual(refund.id, FAKE_REFUND["id"])
self.assertNotEqual(
charge_refunded.balance_transaction.id, refund.balance_transaction.id
)
self.assertEqual(
charge_refunded.balance_transaction.id, FAKE_BALANCE_TRANSACTION["id"]
)
self.assertEqual(refund.balance_transaction.id, FAKE_BALANCE_TRANSACTION_REFUND["id"])
self.assert_fks(
charge_refunded,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.dispute",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("djstripe.models.Account.get_default_account")
@patch(
"stripe.BalanceTransaction.retrieve",
return_value=deepcopy(FAKE_BALANCE_TRANSACTION_REFUND),
)
@patch("stripe.Charge.retrieve")
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
def test_sync_from_stripe_data_refunded(
self,
subscription_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
balance_transaction_retrieve_mock,
default_account_mock,
):
from djstripe.settings import STRIPE_SECRET_KEY
default_account_mock.return_value = self.account
fake_charge_copy = deepcopy(FAKE_CHARGE_REFUNDED)
charge = Charge.sync_from_stripe_data(fake_charge_copy)
self.assertEqual(Decimal("22"), charge.amount)
self.assertEqual(True, charge.paid)
self.assertEqual(True, charge.refunded)
self.assertEqual(True, charge.captured)
self.assertEqual(False, charge.disputed)
self.assertEqual("VideoDoc consultation for ivanp0001 berkp0001", charge.description)
self.assertEqual(charge.amount, charge.amount_refunded)
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_called_once_with(
api_key=STRIPE_SECRET_KEY, expand=[], id=FAKE_BALANCE_TRANSACTION_REFUND["id"]
)
refunds = list(charge.refunds.all())
self.assertEqual(len(refunds), 1)
refund = refunds[0]
self.assertEqual(refund.id, FAKE_REFUND["id"])
self.assertNotEqual(charge.balance_transaction.id, refund.balance_transaction.id)
self.assertEqual(charge.balance_transaction.id, FAKE_BALANCE_TRANSACTION["id"])
self.assertEqual(refund.balance_transaction.id, FAKE_BALANCE_TRANSACTION_REFUND["id"])
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.dispute",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("stripe.BalanceTransaction.retrieve")
@patch("stripe.Charge.retrieve")
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("djstripe.models.Account.get_default_account")
def test_sync_from_stripe_data_max_amount(
self,
default_account_mock,
subscription_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
balance_transaction_retrieve_mock,
):
default_account_mock.return_value = self.account
fake_charge_copy = deepcopy(FAKE_CHARGE)
# https://support.stripe.com/questions/what-is-the-maximum-amount-i-can-charge-with-stripe
fake_charge_copy.update({"amount": 99999999})
charge = Charge.sync_from_stripe_data(fake_charge_copy)
self.assertEqual(Decimal("999999.99"), charge.amount)
self.assertEqual(True, charge.paid)
self.assertEqual(False, charge.refunded)
self.assertEqual(True, charge.captured)
self.assertEqual(False, charge.disputed)
self.assertEqual(0, charge.amount_refunded)
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_not_called()
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.dispute",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.BalanceTransaction.retrieve")
@patch("stripe.Charge.retrieve")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
def test_sync_from_stripe_data_unsupported_source(
self,
invoice_retrieve_mock,
subscription_retrieve_mock,
charge_retrieve_mock,
balance_transaction_retrieve_mock,
default_account_mock,
):
default_account_mock.return_value = self.account
fake_charge_copy = deepcopy(FAKE_CHARGE)
fake_charge_copy.update({"source": {"id": "test_id", "object": "unsupported"}})
charge = Charge.sync_from_stripe_data(fake_charge_copy)
self.assertEqual("test_id", charge.source_id)
self.assertEqual("unsupported", charge.source.type)
self.assertEqual(charge.source, DjstripePaymentMethod.objects.get(id="test_id"))
# alias to old model name should work the same
self.assertEqual(charge.source, PaymentMethod.objects.get(id="test_id"))
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_not_called()
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.dispute",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.BalanceTransaction.retrieve")
@patch("stripe.Charge.retrieve")
def test_sync_from_stripe_data_no_customer(
self, charge_retrieve_mock, balance_transaction_retrieve_mock, default_account_mock
):
default_account_mock.return_value = self.account
fake_charge_copy = deepcopy(FAKE_CHARGE)
fake_charge_copy.pop("customer", None)
# remove invoice since it requires a customer
fake_charge_copy.pop("invoice", None)
Charge.sync_from_stripe_data(fake_charge_copy)
assert Charge.objects.count() == 1
charge = Charge.objects.get()
assert charge.customer is None
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_not_called()
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.customer",
"djstripe.Charge.dispute",
"djstripe.Charge.invoice",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("stripe.BalanceTransaction.retrieve")
@patch("stripe.Charge.retrieve")
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Transfer.retrieve")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("djstripe.models.Account.get_default_account")
def test_sync_from_stripe_data_with_transfer(
self,
default_account_mock,
subscription_retrieve_mock,
transfer_retrieve_mock,
invoice_retrieve_mock,
charge_retrieve_mock,
balance_transaction_retrieve_mock,
):
default_account_mock.return_value = self.account
fake_transfer = deepcopy(FAKE_TRANSFER)
fake_charge_copy = deepcopy(FAKE_CHARGE)
fake_charge_copy.update({"transfer": fake_transfer["id"]})
transfer_retrieve_mock.return_value = fake_transfer
charge_retrieve_mock.return_value = fake_charge_copy
charge, created = Charge._get_or_create_from_stripe_object(
fake_charge_copy, current_ids={fake_charge_copy["id"]}
)
self.assertTrue(created)
self.assertNotEqual(None, charge.transfer)
self.assertEqual(fake_transfer["id"], charge.transfer.id)
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_not_called()
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Account.business_logo",
"djstripe.Charge.dispute",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
@patch("stripe.Charge.retrieve")
@patch("stripe.Account.retrieve")
@patch("stripe.BalanceTransaction.retrieve")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.File.retrieve", return_value=deepcopy(FAKE_FILEUPLOAD))
def test_sync_from_stripe_data_with_destination(
self,
file_retrive_mock,
invoice_retrieve_mock,
subscription_retrieve_mock,
balance_transaction_retrieve_mock,
account_retrieve_mock,
charge_retrieve_mock,
):
account_retrieve_mock.return_value = FAKE_ACCOUNT
fake_charge_copy = deepcopy(FAKE_CHARGE)
fake_charge_copy.update({"destination": FAKE_ACCOUNT["id"]})
charge, created = Charge._get_or_create_from_stripe_object(
fake_charge_copy, current_ids={fake_charge_copy["id"]}
)
self.assertTrue(created)
self.assertEqual(2, Account.objects.count())
account = Account.objects.get(id=FAKE_ACCOUNT["id"])
self.assertEqual(account, charge.account)
charge_retrieve_mock.assert_not_called()
balance_transaction_retrieve_mock.assert_not_called()
self.assert_fks(
charge,
expected_blank_fks={
"djstripe.Charge.dispute",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Plan.product",
},
)
|
|
import unittest
import pprint
import jsonschema
from scenario.player import play_scenario
from scenario.tests.consts import EXECUTABLE, DIALOUGE_PIECES
from scenario.consts import SCENARIO_JSON_SCHEMA
class PlayerTest(unittest.TestCase):
def _generate_scenario(self, name, args, dialogue, strictness=False):
scenario = {
'id': name,
'name': name,
'description': name,
'flow': True,
'strictness': strictness,
'timeout': 1,
'verbosity': 4,
'args': args,
'dialogue': dialogue
}
return scenario
def _run_test(self, result_bool, feedback_type, args, dialogue, **kwargs):
name = '='.join([str(result_bool),
str(feedback_type),
'+'.join(args)])
scenario = self._generate_scenario(name, args, dialogue, **kwargs)
jsonschema.validate(scenario, SCENARIO_JSON_SCHEMA)
feedback = play_scenario(scenario, EXECUTABLE)
print('args:')
pprint.pprint(args)
print
print('dialogue:')
pprint.pprint(dialogue)
print
pprint.pprint(feedback['log'])
self.assertEqual(feedback['result']['bool'], result_bool)
self.assertEqual(feedback['feedback']['type'], feedback_type)
return feedback
class ResultTrueTests(PlayerTest):
def test_all_print(self):
'''
Working: Empty Scenario
Empty `dialogue` with only print executable
'''
dialogue = []
args = ['print']
self._run_test(True, None, args, dialogue)
def test_all_output(self):
'''
Working: All Output
`dialogue` contains all output of executable
'''
dialogue = DIALOUGE_PIECES['output_all']
args = ['print']
self._run_test(True, None, args, dialogue)
def test_one_output(self):
'''
Working: One Output
`dialogue` contains one output of executable
'''
dialogue = [DIALOUGE_PIECES['output4']]
args = ['print']
self._run_test(True, None, args, dialogue)
def test_one_output_input_output(self):
'''
Working: One Output -> Input -> Output (flow=True)
`dialogue` contains one output line of executable
and then input and output
'''
dialogue = [DIALOUGE_PIECES['output4'],
DIALOUGE_PIECES['input_comment'],
DIALOUGE_PIECES['output_comment']]
args = ['print', 'input', 'output']
self._run_test(True, None, args, dialogue)
def test_input_one_output_output(self):
'''
Working: Input -> One Output -> Output (flow=True)
`dialogue` contains one output line of executable
but before input and after output
'''
dialogue = [DIALOUGE_PIECES['input_comment'],
DIALOUGE_PIECES['output4'],
DIALOUGE_PIECES['output_comment']]
args = ['input', 'print', 'output']
self._run_test(True, None, args, dialogue)
class StrictnnessTests(PlayerTest):
def _run_strictness_test(self, result_bool, feedback_type,
sglobal, slocal=None):
'''
Helper method: One Output Upper (strictness: sglobal & slocal)
`dialogue` contains upper case one output of executable
'''
quote = DIALOUGE_PIECES['output4_upper'].copy()
if slocal is not None:
quote['strictness'] = slocal
dialogue = [quote]
args = ['print']
self._run_test(result_bool, feedback_type, args, dialogue,
strictness=sglobal)
def test_one_output_upper_strictness_global_false(self):
'''
Working: One Output Upper (strictness: global=False)
`dialogue` contains upper case one output of executable
'''
self._run_strictness_test(True, None,
sglobal=False)
def test_one_output_upper_strictness_global_true(self):
'''
Not Working: One Output Upper (strictness: global=True)
`dialogue` contains upper case one output of executable
'''
self._run_strictness_test(False, 'ShouldOutputBeforeEOF',
sglobal=True)
def test_one_output_upper_strictness_local_false_global_false(self):
'''
Working: One Output Upper (strictness: local=False, global=False)
`dialogue` contains upper case one output of executable
'''
self._run_strictness_test(True, None,
sglobal=False,
slocal=False)
def test_one_output_upper_strictness_local_true_global_false(self):
'''
Not Working: One Output Upper (strictness: local=True, global=False)
`dialogue` contains upper case one output of executable
'''
self._run_strictness_test(False, 'ShouldOutputBeforeEOF',
sglobal=False,
slocal=True)
def test_one_output_upper_strictness_local_false_global_true(self):
'''
Working: One Output Upper (strictness: local=False, global=True)
`dialogue` contains upper case one output of executable
'''
self._run_strictness_test(True, None,
sglobal=True,
slocal=False)
def test_one_output_upper_strictness_local_true_global_true(self):
'''
Not Working: One Output Upper (strictness: local=True, global=True)
`dialogue` contains upper case one output of executable
'''
self._run_strictness_test(False, 'ShouldOutputBeforeEOF',
sglobal=True,
slocal=True)
class ResultFalseTests(PlayerTest):
def test_ShouldOutput(self):
'''
Feedback Error: Output Incorrect
'''
dialogue = [DIALOUGE_PIECES['output_poet']]
args = ['print', 'input']
self._run_test(False, 'ShouldOutput', args, dialogue)
def test_ShouldEOF(self):
'''
Feedback Error: Should EOF
'''
dialogue = [DIALOUGE_PIECES['output4']]
args = ['print', 'input']
self._run_test(False, 'ShouldEOF', args, dialogue)
def test_ShouldOutputBeforeEOF(self):
'''
Feedback Error: Shpuld Output Before EOF
'''
dialogue = [DIALOUGE_PIECES['output4'],
DIALOUGE_PIECES['output_poet']]
args = ['print']
self._run_test(False, 'ShouldOutputBeforeEOF', args, dialogue)
def test_SholdNoOutputBeforeInput(self):
'''
Feedback Error: Should No Output Before Input (flow=False)
'''
raise unittest.SkipTest
def test_ShouldInputBeforeEOF(self):
'''
Feedback Error: Should Input Before EOF
'''
dialogue = [DIALOUGE_PIECES['output4'],
DIALOUGE_PIECES['input_comment']]
args = ['print']
self._run_test(False, 'ShouldInputBeforeEOF', args, dialogue)
def test_MemoryFeedbackError(self):
pass
class MemoryTests(PlayerTest):
def test_MemoryFeedback(self):
'''
Feedback Error: MemoryFeedback
'''
dialogue = [DIALOUGE_PIECES['output_poet'],
DIALOUGE_PIECES['input_comment']]
args = ['print', 'crash', 'input']
self._run_test(False, 'MemoryFeedbackError', args, dialogue)
class LogTests(PlayerTest):
def test_log_break_lines(self):
'''
Log: Break Lines
The \\r\\n should be added only for the last log quote in the line
'''
dialogue = [DIALOUGE_PIECES['output4_middle']]
args = ['print']
feedback = self._run_test(True, None, args, dialogue)
self.assertEqual(feedback['log']['quotes'][4]['value'],
DIALOUGE_PIECES['output4_prefix']['value'])
self.assertEqual(feedback['log']['quotes'][5]['value'],
DIALOUGE_PIECES['output4_middle']['value'])
self.assertEqual(feedback['log']['quotes'][6]['value'],
DIALOUGE_PIECES['output4_suffix']['value'] + '\r\n')
|
|
"""
A :class:`FieldData` is used by :class:`~xblock.core.XBlock` to read and write
data to particular scoped fields by name. This allows individual runtimes to
provide varied persistence backends while keeping the API used by the `XBlock`
simple.
"""
import copy
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from xblock.exceptions import InvalidScopeError
class FieldData(metaclass=ABCMeta):
"""
An interface allowing access to an XBlock's field values indexed by field names.
"""
@abstractmethod
def get(self, block, name):
"""
Retrieve the value for the field named `name` for the XBlock `block`.
If no value is set, raise a `KeyError`.
The value returned may be mutated without modifying the backing store.
:param block: block to inspect
:type block: :class:`~xblock.core.XBlock`
:param name: field name to look up
:type name: str
"""
raise NotImplementedError
@abstractmethod
def set(self, block, name, value):
"""
Set the value of the field named `name` for XBlock `block`.
`value` may be mutated after this call without affecting the backing store.
:param block: block to modify
:type block: :class:`~xblock.core.XBlock`
:param name: field name to set
:type name: str
:param value: value to set
"""
raise NotImplementedError
@abstractmethod
def delete(self, block, name):
"""
Reset the value of the field named `name` to the default for XBlock `block`.
:param block: block to modify
:type block: :class:`~xblock.core.XBlock`
:param name: field name to delete
:type name: str
"""
raise NotImplementedError
def has(self, block, name):
"""
Return whether or not the field named `name` has a non-default value for the XBlock `block`.
:param block: block to check
:type block: :class:`~xblock.core.XBlock`
:param name: field name
:type name: str
"""
try:
self.get(block, name)
return True
except KeyError:
return False
def set_many(self, block, update_dict):
"""
Update many fields on an XBlock simultaneously.
:param block: the block to update
:type block: :class:`~xblock.core.XBlock`
:param update_dict: A map of field names to their new values
:type update_dict: dict
"""
for key, value in update_dict.items():
self.set(block, key, value)
def default(self, block, name):
"""
Get the default value for this field which may depend on context or may just be the field's global
default. The default behavior is to raise KeyError which will cause the caller to return the field's
global default.
:param block: the block containing the field being defaulted
:type block: :class:`~xblock.core.XBlock`
:param name: the field's name
:type name: `str`
"""
raise KeyError(repr(name))
class DictFieldData(FieldData):
"""
A FieldData that uses a single supplied dictionary to store fields by name.
"""
def __init__(self, data):
self._data = data
def get(self, block, name):
return copy.deepcopy(self._data[name])
def set(self, block, name, value):
self._data[name] = copy.deepcopy(value)
def delete(self, block, name):
del self._data[name]
def has(self, block, name):
return name in self._data
def set_many(self, block, update_dict):
self._data.update(copy.deepcopy(update_dict))
class SplitFieldData(FieldData):
"""
A FieldData that uses divides particular scopes between
several backing FieldData objects.
"""
def __init__(self, scope_mappings):
"""
`scope_mappings` defines :class:`~xblock.field_data.FieldData` objects to use
for each scope. If a scope is not a key in `scope_mappings`, then using
a field of that scope will raise an :class:`~xblock.exceptions.InvalidScopeError`.
:param scope_mappings: A map from Scopes to backing FieldData instances
:type scope_mappings: `dict` of :class:`~xblock.fields.Scope` to :class:`~xblock.field_data.FieldData`
"""
self._scope_mappings = scope_mappings
def _field_data(self, block, name):
"""Return the field data for the field `name` on the :class:`~xblock.core.XBlock` `block`"""
scope = block.fields[name].scope
if scope not in self._scope_mappings:
raise InvalidScopeError(scope)
return self._scope_mappings[scope]
def get(self, block, name):
return self._field_data(block, name).get(block, name)
def set(self, block, name, value):
self._field_data(block, name).set(block, name, value)
def set_many(self, block, update_dict):
update_dicts = defaultdict(dict)
for key, value in update_dict.items():
update_dicts[self._field_data(block, key)][key] = value
for field_data, new_update_dict in update_dicts.items():
field_data.set_many(block, new_update_dict)
def delete(self, block, name):
self._field_data(block, name).delete(block, name)
def has(self, block, name):
return self._field_data(block, name).has(block, name)
def default(self, block, name):
return self._field_data(block, name).default(block, name)
def save_block(self, block):
""" saving data """
field_datas = set(self._scope_mappings.values())
for field_data in field_datas:
field_data.save_block(block)
class ReadOnlyFieldData(FieldData):
"""
A FieldData that wraps another FieldData an makes all calls to set and delete
raise :class:`~xblock.exceptions.InvalidScopeError`s.
"""
def __init__(self, source):
self._source = source
def get(self, block, name):
return self._source.get(block, name)
def set(self, block, name, value):
raise InvalidScopeError(f"{block}.{name} is read-only, cannot set")
def delete(self, block, name):
raise InvalidScopeError(f"{block}.{name} is read-only, cannot delete")
def has(self, block, name):
return self._source.has(block, name)
def default(self, block, name):
return self._source.default(block, name)
def __repr__(self):
return f"ReadOnlyFieldData({self._source!r})"
|
|
import numpy as np
import fitsio
from astrometry.util.fits import fits_table
from astrometry.util.resample import resample_with_wcs, OverlapError
from legacypipe.bits import DQ_BITS
from legacypipe.survey import tim_get_resamp
from legacypipe.utils import copy_header_with_wcs
import logging
logger = logging.getLogger('legacypipe.coadds')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
class SimpleCoadd(object):
'''A class for handling coadds of unWISE (and GALEX) images.
'''
def __init__(self, ra, dec, W, H, pixscale, bands):
from legacypipe.survey import wcs_for_brick, BrickDuck
self.W = W
self.H = H
self.bands = bands
brick = BrickDuck(ra, dec, 'quack')
self.wcs = wcs_for_brick(brick, W=self.W, H=self.H,
pixscale=pixscale)
# images
self.co_images = dict([(band, np.zeros((self.H,self.W), np.float32))
for band in bands])
self.co_nobs = dict([(band, np.zeros((self.H,self.W), np.uint16))
for band in bands])
# models
self.co_models = dict([(band, np.zeros((self.H,self.W), np.float32))
for band in bands])
# invvars
self.co_invvars = dict([(band, np.zeros((self.H,self.W), np.float32))
for band in bands])
def add(self, models, unique=False):
for name, band, wcs, img, mod, ie in models:
debug('Accumulating tile', name, 'band', band)
try:
Yo,Xo,Yi,Xi,resam = resample_with_wcs(self.wcs, wcs,
[img, mod], intType=np.int16)
except OverlapError:
debug('No overlap between tile', name, 'and coadd')
continue
rimg,rmod = resam
debug('Adding', len(Yo), 'pixels from tile', name, 'to coadd')
iv = ie[Yi,Xi]**2
if unique:
K = np.flatnonzero((self.co_nobs[band][Yo,Xo] == 0) * (iv>0))
iv = iv[K]
rimg = rimg[K]
rmod = rmod[K]
Yo = Yo[K]
Xo = Xo[K]
debug('Cut to', len(Yo), 'unique pixels w/ iv>0')
debug('Tile:', np.sum(iv>0), 'of', len(iv), 'pixels have IV')
self.co_images [band][Yo,Xo] += rimg * iv
self.co_models [band][Yo,Xo] += rmod * iv
self.co_nobs [band][Yo,Xo] += 1
self.co_invvars[band][Yo,Xo] += iv
debug('Band', band, ': now', np.sum(self.co_nobs[band]>0), 'pixels are set in image coadd')
def finish(self, survey, brickname, version_header,
apradec=None, apertures=None):
# apradec = (ra,dec): aperture photometry locations
# apertures: RADII in PIXELS
if apradec is not None:
assert(apertures is not None)
(ra,dec) = apradec
ok,xx,yy = self.wcs.radec2pixelxy(ra, dec)
assert(np.all(ok))
del ok
apxy = np.vstack((xx - 1., yy - 1.)).T
ap_iphots = [np.zeros((len(ra), len(apertures)), np.float32)
for band in self.bands]
ap_dphots = [np.zeros((len(ra), len(apertures)), np.float32)
for band in self.bands]
ap_rphots = [np.zeros((len(ra), len(apertures)), np.float32)
for band in self.bands]
coimgs = []
comods = []
for iband,band in enumerate(self.bands):
coimg = self.co_images[band]
comod = self.co_models[band]
coiv = self.co_invvars[band]
con = self.co_nobs[band]
with np.errstate(divide='ignore', invalid='ignore'):
coimg /= coiv
comod /= coiv
coimg[coiv == 0] = 0.
comod[coiv == 0] = 0.
coimgs.append(coimg)
comods.append(comod)
hdr = copy_header_with_wcs(version_header, self.wcs)
self.add_to_header(hdr, band)
self.write_coadds(survey, brickname, hdr, band, coimg, comod, coiv, con)
if apradec is not None:
import photutils
mask = (coiv == 0)
with np.errstate(divide='ignore'):
imsigma = 1.0/np.sqrt(coiv)
imsigma[mask] = 0.
for irad,rad in enumerate(apertures):
aper = photutils.CircularAperture(apxy, rad)
p = photutils.aperture_photometry(coimg, aper, error=imsigma,
mask=mask)
ap_iphots[iband][:,irad] = p.field('aperture_sum')
ap_dphots[iband][:,irad] = p.field('aperture_sum_err')
p = photutils.aperture_photometry(coimg - comod, aper, mask=mask)
ap_rphots[iband][:,irad] = p.field('aperture_sum')
self.write_color_image(survey, brickname, coimgs, comods)
if apradec is not None:
return ap_iphots, ap_dphots, ap_rphots
def add_to_header(self, hdr, band):
pass
def write_coadds(self, survey, brickname, hdr, band, coimg, comod, coiv, con):
pass
def write_color_image(self, survey, brickname, coimgs, comods):
pass
class UnwiseCoadd(SimpleCoadd):
def __init__(self, ra, dec, W, H, pixscale):
super().__init__(ra, dec, W, H, pixscale, [1,2,3,4])
def add_to_header(self, hdr, band):
hdr.add_record(dict(name='TELESCOP', value='WISE'))
hdr.add_record(dict(name='FILTER', value='W%i' % band, comment='WISE band'))
hdr.add_record(dict(name='MAGZERO', value=22.5,
comment='Magnitude zeropoint'))
hdr.add_record(dict(name='MAGSYS', value='Vega',
comment='This WISE image is in Vega fluxes'))
def write_coadds(self, survey, brickname, hdr, band, coimg, comod, coiv, con):
with survey.write_output('image', brick=brickname, band='W%i'%band,
shape=coimg.shape) as out:
out.fits.write(coimg, header=hdr)
with survey.write_output('model', brick=brickname, band='W%i'%band,
shape=comod.shape) as out:
out.fits.write(comod, header=hdr)
with survey.write_output('invvar', brick=brickname, band='W%i'%band,
shape=coiv.shape) as out:
out.fits.write(coiv, header=hdr)
def write_color_image(self, survey, brickname, coimgs, comods):
from legacypipe.survey import imsave_jpeg
# W1/W2 color jpeg
rgb = _unwise_to_rgb(coimgs[:2])
with survey.write_output('wise-jpeg', brick=brickname) as out:
imsave_jpeg(out.fn, rgb, origin='lower')
info('Wrote', out.fn)
rgb = _unwise_to_rgb(comods[:2])
with survey.write_output('wisemodel-jpeg', brick=brickname) as out:
imsave_jpeg(out.fn, rgb, origin='lower')
info('Wrote', out.fn)
coresids = [coimg - comod for coimg, comod in zip(coimgs[:2], comods[:2])]
rgb = _unwise_to_rgb(coresids)
with survey.write_output('wiseresid-jpeg', brick=brickname) as out:
imsave_jpeg(out.fn, rgb, origin='lower')
info('Wrote', out.fn)
def _unwise_to_rgb(imgs):
img = imgs[0]
H,W = img.shape
## FIXME
w1,w2 = imgs
rgb = np.zeros((H, W, 3), np.uint8)
scale1 = 50.
scale2 = 50.
mn,mx = -1.,100.
arcsinh = 1.
img1 = w1 / scale1
img2 = w2 / scale2
if arcsinh is not None:
def nlmap(x):
return np.arcsinh(x * arcsinh) / np.sqrt(arcsinh)
mean = (img1 + img2) / 2.
I = nlmap(mean)
with np.errstate(divide='ignore'):
img1 = I * img1 / mean
img2 = I * img2 / mean
img1[mean == 0] = 0.
img2[mean == 0] = 0.
mn = nlmap(mn)
mx = nlmap(mx)
img1 = (img1 - mn) / (mx - mn)
img2 = (img2 - mn) / (mx - mn)
rgb[:,:,2] = (np.clip(img1, 0., 1.) * 255).astype(np.uint8)
rgb[:,:,0] = (np.clip(img2, 0., 1.) * 255).astype(np.uint8)
rgb[:,:,1] = rgb[:,:,0]/2 + rgb[:,:,2]/2
return rgb
def make_coadds(tims, bands, targetwcs,
coweights=True,
mods=None, blobmods=None,
xy=None, apertures=None, apxy=None,
ngood=False, detmaps=False, psfsize=False,
allmasks=True, anymasks=False,
get_max=False, sbscale=True,
psf_images=False, nsatur=None,
callback=None, callback_args=None,
plots=False, ps=None,
lanczos=True, mp=None,
satur_val=10.):
from astrometry.util.ttime import Time
t0 = Time()
if callback_args is None:
callback_args = []
class Duck(object):
pass
C = Duck()
W = int(targetwcs.get_width())
H = int(targetwcs.get_height())
# always, for patching SATUR, etc pixels?
unweighted=True
C.coimgs = []
if coweights:
# the pixelwise inverse-variances (weights) of the "coimgs".
C.cowimgs = []
if detmaps:
C.galdetivs = []
C.psfdetivs = []
if mods is not None:
C.comods = []
C.coresids = []
if blobmods is not None:
C.coblobmods = []
C.coblobresids = []
if apertures is not None:
C.AP = fits_table()
if allmasks:
C.allmasks = []
if anymasks:
C.anymasks = []
if max:
C.maximgs = []
if psf_images:
C.psf_imgs = []
if nsatur:
C.satmaps = []
if xy:
ix,iy = xy
C.T = fits_table()
if ngood:
C.T.ngood = np.zeros((len(ix), len(bands)), np.int16)
C.T.nobs = np.zeros((len(ix), len(bands)), np.int16)
C.T.anymask = np.zeros((len(ix), len(bands)), np.int16)
C.T.allmask = np.zeros((len(ix), len(bands)), np.int16)
if psfsize:
C.T.psfsize = np.zeros((len(ix), len(bands)), np.float32)
if detmaps:
C.T.psfdepth = np.zeros((len(ix), len(bands)), np.float32)
C.T.galdepth = np.zeros((len(ix), len(bands)), np.float32)
if lanczos:
debug('Doing Lanczos resampling')
for tim in tims:
# surface-brightness correction
tim.sbscale = (targetwcs.pixel_scale() / tim.subwcs.pixel_scale())**2
# We create one iterator per band to do the tim resampling. These all run in
# parallel when multi-processing.
imaps = []
for band in bands:
args = []
for itim,tim in enumerate(tims):
if tim.band != band:
continue
if mods is None:
mo = None
else:
mo = mods[itim]
if blobmods is None:
bmo = None
else:
bmo = blobmods[itim]
args.append((itim,tim,mo,bmo,lanczos,targetwcs,sbscale))
if mp is not None:
imaps.append(mp.imap_unordered(_resample_one, args))
else:
imaps.append(map(_resample_one, args))
# Args for aperture photometry
apargs = []
if xy:
# To save the memory of 2 x float64 maps, we instead do arg min/max maps
# append a 0 to the list of mjds so that mjds[-1] gives 0.
mjds = np.array([tim.time.toMjd() for tim in tims] + [0])
mjd_argmins = np.empty((H,W), np.int16)
mjd_argmaxs = np.empty((H,W), np.int16)
mjd_argmins[:,:] = -1
mjd_argmaxs[:,:] = -1
if plots:
allresids = []
tinyw = 1e-30
for iband,(band,timiter) in enumerate(zip(bands, imaps)):
debug('Computing coadd for band', band)
# coadded weight map (moo)
cow = np.zeros((H,W), np.float32)
# coadded weighted image map
cowimg = np.zeros((H,W), np.float32)
kwargs = dict(cowimg=cowimg, cow=cow)
if detmaps:
# detection map inverse-variance (depth map)
psfdetiv = np.zeros((H,W), np.float32)
C.psfdetivs.append(psfdetiv)
kwargs.update(psfdetiv=psfdetiv)
# galaxy detection map inverse-variance (galdepth map)
galdetiv = np.zeros((H,W), np.float32)
C.galdetivs.append(galdetiv)
kwargs.update(galdetiv=galdetiv)
if mods is not None:
# model image
cowmod = np.zeros((H,W), np.float32)
# chi-squared image
cochi2 = np.zeros((H,W), np.float32)
kwargs.update(cowmod=cowmod, cochi2=cochi2)
if blobmods is not None:
# model image
cowblobmod = np.zeros((H,W), np.float32)
kwargs.update(cowblobmod=cowblobmod)
if unweighted:
# unweighted image
coimg = np.zeros((H,W), np.float32)
if mods is not None:
# unweighted model
comod = np.zeros((H,W), np.float32)
if blobmods is not None:
coblobmod = np.zeros((H,W), np.float32)
# number of exposures
con = np.zeros((H,W), np.int16)
kwargs.update(coimg=coimg)
# We have *three* counters for the number of pixels
# overlapping each coadd brick pixel:
#
# - "con" counts the pixels included in the unweighted coadds.
# This map is not passed outside this function or used
# anywhere else.
#
# - "congood" counts pixels with (iv > 0). This gets passed
# to the *write_coadd_images* function, where it gets
# written to the *nexp* maps.
#
# - "nobs" counts all pixels, regardless of masks. This gets
# sampled at *xy* positions, and ends up in the tractor
# catalog "nobs" column.
#
# (you want to know the number of observations within the
# source footprint, not just the peak pixel which may be
# saturated, etc.)
if ngood:
congood = np.zeros((H,W), np.int16)
kwargs.update(congood=congood)
if xy or allmasks or anymasks:
# These match the type of the "DQ" images.
# "any" mask
ormask = np.zeros((H,W), np.int16)
# "all" mask
andmask = np.empty((H,W), np.int16)
from functools import reduce
allbits = reduce(np.bitwise_or, DQ_BITS.values())
andmask[:,:] = allbits
kwargs.update(ormask=ormask, andmask=andmask)
if xy or allmasks:
# number of observations
nobs = np.zeros((H,W), np.int16)
if nsatur:
satmap = np.zeros((H,W), np.int16)
if psfsize:
psfsizemap = np.zeros((H,W), np.float32)
# like "cow", but constant invvar per-CCD;
# only required for psfsizemap
flatcow = np.zeros((H,W), np.float32)
kwargs.update(psfsize=psfsizemap)
if max:
maximg = np.zeros((H,W), np.float32)
C.maximgs.append(maximg)
if psf_images:
psf_img = 0.
for R in timiter:
if R is None:
continue
itim,Yo,Xo,iv,im,mo,bmo,dq = R
tim = tims[itim]
if plots:
_make_coadds_plots_1(im, band, mods, mo, iv, unweighted,
dq, satur_val, allresids, ps, H, W,
tim, Yo, Xo)
# invvar-weighted image
cowimg[Yo,Xo] += iv * im
cow [Yo,Xo] += iv
goodpix = None
if unweighted:
if dq is None:
goodpix = 1
else:
# include SATUR pixels if no other pixels exists
okbits = 0
for bitname in ['satur']:
okbits |= DQ_BITS[bitname]
brightpix = ((dq & okbits) != 0)
if satur_val is not None:
# HACK -- force SATUR pix to be bright
im[brightpix] = satur_val
# Include these pixels if none other exist??
for bitname in ['interp']:
okbits |= DQ_BITS[bitname]
goodpix = ((dq & ~okbits) == 0)
coimg[Yo,Xo] += goodpix * im
con [Yo,Xo] += goodpix
if xy or allmasks or anymasks:
if dq is not None:
ormask [Yo,Xo] |= dq
andmask[Yo,Xo] &= dq
if xy or allmasks:
# raw exposure count
nobs[Yo,Xo] += 1
if xy:
# mjd_min/max
update = np.logical_or(mjd_argmins[Yo,Xo] == -1,
(mjd_argmins[Yo,Xo] > -1) *
(mjds[itim] < mjds[mjd_argmins[Yo,Xo]]))
mjd_argmins[Yo[update],Xo[update]] = itim
update = np.logical_or(mjd_argmaxs[Yo,Xo] == -1,
(mjd_argmaxs[Yo,Xo] > -1) *
(mjds[itim] > mjds[mjd_argmaxs[Yo,Xo]]))
mjd_argmaxs[Yo[update],Xo[update]] = itim
del update
if nsatur and dq is not None:
satmap[Yo,Xo] += (1*((dq & DQ_BITS['satur'])>0))
if psfsize:
# psfnorm is in units of 1/pixels.
# (eg, psfnorm for a gaussian is 1./(2.*sqrt(pi) * psf_sigma) )
# Neff is in pixels**2
neff = 1./tim.psfnorm**2
# Narcsec is in arcsec**2
narcsec = neff * tim.wcs.pixel_scale()**2
# Make smooth maps -- don't ignore CRs, saturated pix, etc
iv1 = 1./tim.sig1**2
psfsizemap[Yo,Xo] += iv1 * (1. / narcsec)
flatcow [Yo,Xo] += iv1
if psf_images:
from astrometry.util.util import lanczos3_interpolate
h,w = tim.shape
patch = tim.psf.getPointSourcePatch(w//2, h//2).patch
patch /= np.sum(patch)
# In case the tim and coadd have different pixel scales,
# resample the PSF stamp.
ph,pw = patch.shape
pscale = tim.imobj.pixscale / targetwcs.pixel_scale()
coph = int(np.ceil(ph * pscale))
copw = int(np.ceil(pw * pscale))
coph = 2 * (coph//2) + 1
copw = 2 * (copw//2) + 1
# want input image pixel coords that change by 1/pscale
# and are centered on pw//2, ph//2
cox = np.arange(copw) * 1./pscale
cox += pw//2 - cox[copw//2]
coy = np.arange(coph) * 1./pscale
coy += ph//2 - coy[coph//2]
fx,fy = np.meshgrid(cox,coy)
fx = fx.ravel()
fy = fy.ravel()
ix = (fx + 0.5).astype(np.int32)
iy = (fy + 0.5).astype(np.int32)
dx = (fx - ix).astype(np.float32)
dy = (fy - iy).astype(np.float32)
copsf = np.zeros(coph*copw, np.float32)
lanczos3_interpolate(ix, iy, dx, dy, [copsf], [patch])
copsf = copsf.reshape((coph,copw))
copsf /= copsf.sum()
if plots:
_make_coadds_plots_2(patch, copsf, psf_img, tim, band, ps)
psf_img += copsf / tim.sig1**2
if detmaps:
# point-source depth
detsig1 = tim.sig1 / tim.psfnorm
psfdetiv[Yo,Xo] += (iv > 0) * (1. / detsig1**2)
# Galaxy detection map
gdetsig1 = tim.sig1 / tim.galnorm
galdetiv[Yo,Xo] += (iv > 0) * (1. / gdetsig1**2)
if ngood:
congood[Yo,Xo] += (iv > 0)
if mods is not None:
# straight-up
comod[Yo,Xo] += goodpix * mo
# invvar-weighted
cowmod[Yo,Xo] += iv * mo
# chi-squared
cochi2[Yo,Xo] += iv * (im - mo)**2
del mo
if blobmods is not None:
# straight-up
coblobmod[Yo,Xo] += goodpix * bmo
# invvar-weighted
cowblobmod[Yo,Xo] += iv * bmo
del bmo
del goodpix
if max:
maximg[Yo,Xo] = np.maximum(maximg[Yo,Xo], im * (iv>0))
del Yo,Xo,im,iv
# END of loop over tims
# Per-band:
cowimg /= np.maximum(cow, tinyw)
C.coimgs.append(cowimg)
if coweights:
C.cowimgs.append(cow)
if mods is not None:
cowmod /= np.maximum(cow, tinyw)
C.comods.append(cowmod)
coresid = cowimg - cowmod
coresid[cow == 0] = 0.
C.coresids.append(coresid)
if blobmods is not None:
cowblobmod /= np.maximum(cow, tinyw)
C.coblobmods.append(cowblobmod)
coblobresid = cowimg - cowblobmod
coblobresid[cow == 0] = 0.
C.coblobresids.append(coblobresid)
if xy or allmasks:
# If these was no coverage, don't set ALLMASK
andmask[nobs == 0] = 0
if allmasks:
C.allmasks.append(andmask)
if anymasks:
C.anymasks.append(ormask)
if nsatur:
C.satmaps.append(satmap >= nsatur)
if psf_images:
C.psf_imgs.append(psf_img / np.sum(psf_img))
if unweighted:
coimg /= np.maximum(con, 1)
del con
if plots:
_make_coadds_plots_3(cowimg, cow, coimg, band, ps)
# Patch pixels with no data in the weighted coadd.
cowimg[cow == 0] = coimg[cow == 0]
del coimg
if mods is not None:
cowmod[cow == 0] = comod[cow == 0]
del comod
if blobmods is not None:
cowblobmod[cow == 0] = coblobmod[cow == 0]
del coblobmod
if xy:
if ngood:
C.T.ngood [:,iband] = congood[iy,ix]
C.T.nobs [:,iband] = nobs [iy,ix]
C.T.anymask[:,iband] = ormask [iy,ix]
C.T.allmask[:,iband] = andmask[iy,ix]
if detmaps:
C.T.psfdepth[:,iband] = psfdetiv[iy, ix]
C.T.galdepth[:,iband] = galdetiv[iy, ix]
if psfsize:
# psfsizemap is accumulated in units of iv * (1 / arcsec**2)
# take out the weighting
psfsizemap /= np.maximum(flatcow, tinyw)
# Correction factor to get back to equivalent of Gaussian sigma
tosigma = 1./(2. * np.sqrt(np.pi))
# Conversion factor to FWHM (2.35)
tofwhm = 2. * np.sqrt(2. * np.log(2.))
# Scale back to units of linear arcsec.
with np.errstate(divide='ignore'):
psfsizemap[:,:] = (1. / np.sqrt(psfsizemap)) * tosigma * tofwhm
psfsizemap[flatcow == 0] = 0.
if xy:
C.T.psfsize[:,iband] = psfsizemap[iy,ix]
if apertures is not None:
# Aperture photometry
# photutils.aperture_photometry: mask=True means IGNORE
mask = (cow == 0)
with np.errstate(divide='ignore'):
imsigma = 1.0/np.sqrt(cow)
imsigma[mask] = 0.
for irad,rad in enumerate(apertures):
apargs.append((irad, band, rad, cowimg, imsigma, mask,
True, apxy))
if mods is not None:
apargs.append((irad, band, rad, coresid, None, None,
False, apxy))
if blobmods is not None:
apargs.append((irad, band, rad, coblobresid, None, None,
False, apxy))
if not coweights:
del cow
if callback is not None:
callback(band, *callback_args, **kwargs)
# END of loop over bands
t2 = Time()
debug('coadds: images:', t2-t0)
if plots:
_make_coadds_plots_4(allresids, mods, ps)
if xy is not None:
C.T.mjd_min = mjds[mjd_argmins[iy,ix]]
C.T.mjd_max = mjds[mjd_argmaxs[iy,ix]]
del mjd_argmins
del mjd_argmaxs
if apertures is not None:
# Aperture phot, in parallel
if mp is not None:
apresults = mp.map(_apphot_one, apargs)
else:
apresults = map(_apphot_one, apargs)
del apargs
apresults = iter(apresults)
for iband,band in enumerate(bands):
apimg = []
apimgerr = []
apmask = []
if mods is not None:
apres = []
if blobmods is not None:
apblobres = []
for irad,rad in enumerate(apertures):
(airad, aband, isimg, ap_img, ap_err, ap_mask) = next(apresults)
assert(airad == irad)
assert(aband == band)
assert(isimg)
apimg.append(ap_img)
apimgerr.append(ap_err)
apmask.append(ap_mask)
if mods is not None:
(airad, aband, isimg, ap_img, ap_err, ap_mask) = next(apresults)
assert(airad == irad)
assert(aband == band)
assert(not isimg)
apres.append(ap_img)
assert(ap_err is None)
assert(ap_mask is None)
if blobmods is not None:
(airad, aband, isimg, ap_img, ap_err, ap_mask) = next(apresults)
assert(airad == irad)
assert(aband == band)
assert(not isimg)
apblobres.append(ap_img)
assert(ap_err is None)
assert(ap_mask is None)
ap = np.vstack(apimg).T
ap[np.logical_not(np.isfinite(ap))] = 0.
C.AP.set('apflux_img_%s' % band, ap)
with np.errstate(divide='ignore'):
ap = 1./(np.vstack(apimgerr).T)**2
ap[np.logical_not(np.isfinite(ap))] = 0.
C.AP.set('apflux_img_ivar_%s' % band, ap)
ap = np.vstack(apmask).T
ap[np.logical_not(np.isfinite(ap))] = 0.
C.AP.set('apflux_masked_%s' % band, ap)
if mods is not None:
ap = np.vstack(apres).T
ap[np.logical_not(np.isfinite(ap))] = 0.
C.AP.set('apflux_resid_%s' % band, ap)
if blobmods is not None:
ap = np.vstack(apblobres).T
ap[np.logical_not(np.isfinite(ap))] = 0.
C.AP.set('apflux_blobresid_%s' % band, ap)
t3 = Time()
debug('coadds apphot:', t3-t2)
return C
def _make_coadds_plots_4(allresids, mods, ps):
import pylab as plt
I = np.argsort([a[0] for a in allresids])
cols = int(np.ceil(np.sqrt(len(I))))
rows = int(np.ceil(len(I) / float(cols)))
allresids = [allresids[i] for i in I]
plt.clf()
for i,(y,n,img,mod,res) in enumerate(allresids):
plt.subplot(rows,cols,i+1)
plt.imshow(img, interpolation='nearest', origin='lower', cmap='gray')
plt.xticks([]); plt.yticks([])
plt.title('%.1f: %s' % (y, n))
plt.suptitle('Data')
ps.savefig()
if mods is not None:
plt.clf()
for i,(y,n,img,mod,res) in enumerate(allresids):
plt.subplot(rows,cols,i+1)
plt.imshow(mod, interpolation='nearest', origin='lower', cmap='gray')
plt.xticks([]); plt.yticks([])
plt.title('%.1f: %s' % (y, n))
plt.suptitle('Model')
ps.savefig()
plt.clf()
for i,(y,n,img,mod,res) in enumerate(allresids):
plt.subplot(rows,cols,i+1)
plt.imshow(res, interpolation='nearest', origin='lower', cmap='gray',
vmin=-20, vmax=20)
plt.xticks([]); plt.yticks([])
plt.title('%.1f: %s' % (y, n))
plt.suptitle('Resids')
ps.savefig()
def _make_coadds_plots_3(cowimg, cow, coimg, band, ps):
import pylab as plt
plt.clf()
plt.subplot(2,2,1)
mn,mx = cowimg.min(), cowimg.max()
plt.imshow(cowimg, interpolation='nearest', origin='lower', cmap='gray',
vmin=mn, vmax=mx)
plt.xticks([]); plt.yticks([])
plt.title('weighted img')
plt.subplot(2,2,2)
mycow = cow.copy()
# mark zero as special color
#mycow[mycow == 0] = np.nan
plt.imshow(mycow, interpolation='nearest', origin='lower', cmap='gray',
vmin=0)
plt.xticks([]); plt.yticks([])
plt.title('weights')
plt.subplot(2,2,3)
plt.imshow(coimg, interpolation='nearest', origin='lower', cmap='gray')
plt.xticks([]); plt.yticks([])
plt.title('unweighted img')
mycowimg = cowimg.copy()
mycowimg[cow == 0] = coimg[cow == 0]
plt.subplot(2,2,4)
plt.imshow(mycowimg, interpolation='nearest', origin='lower',
cmap='gray', vmin=mn, vmax=mx)
plt.xticks([]); plt.yticks([])
plt.title('patched img')
plt.suptitle('band %s' % band)
ps.savefig()
def _make_coadds_plots_2(patch, copsf, psf_img, tim, band, ps):
import pylab as plt
plt.clf()
plt.subplot(2,2,1)
plt.imshow(patch, interpolation='nearest', origin='lower')
plt.title('PSF')
plt.subplot(2,2,2)
plt.imshow(copsf, interpolation='nearest', origin='lower')
plt.title('resampled PSF')
plt.subplot(2,2,3)
plt.imshow(np.atleast_2d(psf_img), interpolation='nearest', origin='lower')
plt.title('PSF acc')
plt.subplot(2,2,4)
plt.imshow(psf_img + copsf/tim.sig1**2, interpolation='nearest', origin='lower')
plt.title('PSF acc after')
plt.suptitle('Tim %s band %s' % (tim.name, band))
ps.savefig()
def _make_coadds_plots_1(im, band, mods, mo, iv, unweighted,
dq, satur_val, allresids, ps, H, W,
tim, Yo, Xo):
from legacypipe.survey import get_rgb
import pylab as plt
# # Make one grayscale, brick-space plot per image
# thisimg = np.zeros((H,W), np.float32)
# thisimg[Yo,Xo] = im
# rgb = get_rgb([thisimg], [band])
# rgb = rgb.sum(axis=2)
# fn = ps.getnext()
# plt.imsave(fn, rgb, origin='lower', cmap='gray')
#plt.clf()
#plt.imshow(rgb, interpolation='nearest', origin='lower', cmap='gray')
#plt.xticks([]); plt.yticks([])
#ps.savefig()
# Image, Model, and Resids
plt.clf()
plt.subplot(2,2,1)
thisimg = np.zeros((H,W), np.float32)
thisimg[Yo,Xo] = im
rgb = get_rgb([thisimg], [band])
iplane = dict(g=2, r=1, i=0, z=0)[band]
rgbimg = rgb[:,:,iplane]
plt.imshow(rgbimg, interpolation='nearest', origin='lower', cmap='gray')
plt.xticks([]); plt.yticks([])
if mods is not None:
plt.subplot(2,2,2)
thismod = np.zeros((H,W), np.float32)
thismod[Yo,Xo] = mo
rgb = get_rgb([thismod], [band])
rgbmod = rgb[:,:,iplane]
plt.imshow(rgbmod, interpolation='nearest', origin='lower', cmap='gray')
plt.xticks([]); plt.yticks([])
plt.subplot(2,2,3)
thisres = np.zeros((H,W), np.float32)
thisres[Yo,Xo] = (im - mo) * np.sqrt(iv)
plt.imshow(thisres, interpolation='nearest', origin='lower', cmap='gray',
vmin=-20, vmax=20)
plt.xticks([]); plt.yticks([])
else:
if unweighted and (dq is not None):
# HACK -- copy-n-pasted code from below.
okbits = 0
#for bitname in ['satur', 'bleed']:
for bitname in ['satur']:
okbits |= DQ_BITS[bitname]
brightpix = ((dq & okbits) != 0)
myim = im.copy()
if satur_val is not None:
# HACK -- force SATUR pix to be bright
myim[brightpix] = satur_val
#for bitname in ['interp']:
for bitname in ['interp', 'bleed']:
okbits |= DQ_BITS[bitname]
goodpix = ((dq & ~okbits) == 0)
thisgood = np.zeros((H,W), np.float32)
thisgood[Yo,Xo] = goodpix
plt.subplot(2,2,2)
plt.imshow(thisgood, interpolation='nearest', origin='lower', cmap='gray', vmin=0, vmax=1)
plt.xticks([]); plt.yticks([])
plt.title('goodpix')
thisim = np.zeros((H,W), np.float32)
thisim[Yo,Xo] = goodpix * myim
rgb = get_rgb([thisim], [band])
iplane = dict(g=2, r=1, i=1, z=0)[band]
rgbimg = rgb[:,:,iplane]
plt.subplot(2,2,3)
plt.imshow(rgbimg, interpolation='nearest', origin='lower', cmap='gray')
plt.xticks([]); plt.yticks([])
plt.title('goodpix rgb')
rgbmod=None
thisres=None
plt.subplot(2,2,4)
thisiv = np.zeros((H,W), np.float32)
thisiv[Yo,Xo] = iv
plt.imshow(thisiv, interpolation='nearest', origin='lower', cmap='gray')
plt.xticks([]); plt.yticks([])
plt.title('invvar')
plt.suptitle(tim.name + ': %.2f' % (tim.time.toYear()))
ps.savefig()
allresids.append((tim.time.toYear(), tim.name, rgbimg,rgbmod,thisres))
def _resample_one(args):
(itim,tim,mod,blobmod,lanczos,targetwcs,sbscale) = args
if lanczos:
from astrometry.util.miscutils import patch_image
patched = tim.getImage().copy()
assert(np.all(np.isfinite(tim.getInvError())))
okpix = (tim.getInvError() > 0)
patch_image(patched, okpix)
del okpix
imgs = [patched]
if mod is not None:
imgs.append(mod)
if blobmod is not None:
imgs.append(blobmod)
else:
imgs = []
try:
Yo,Xo,Yi,Xi,rimgs = resample_with_wcs(
targetwcs, tim.subwcs, imgs, 3, intType=np.int16)
except OverlapError:
return None
if len(Yo) == 0:
return None
mo = None
bmo = None
if lanczos:
im = rimgs[0]
inext = 1
if mod is not None:
mo = rimgs[inext]
inext += 1
if blobmod is not None:
bmo = rimgs[inext]
inext += 1
del patched,imgs,rimgs
else:
im = tim.getImage ()[Yi,Xi]
if mod is not None:
mo = mod[Yi,Xi]
if blobmod is not None:
bmo = blobmod[Yi,Xi]
iv = tim.getInvvar()[Yi,Xi]
if sbscale:
fscale = tim.sbscale
debug('Applying surface-brightness scaling of %.3f to' % fscale, tim.name)
im *= fscale
iv /= (fscale**2)
if mod is not None:
mo *= fscale
if blobmod is not None:
bmo *= fscale
if tim.dq is None:
dq = None
else:
dq = tim.dq[Yi,Xi]
return itim,Yo,Xo,iv,im,mo,bmo,dq
def _apphot_one(args):
(irad, band, rad, img, sigma, mask, isimage, apxy) = args
import photutils
result = [irad, band, isimage]
aper = photutils.CircularAperture(apxy, rad)
p = photutils.aperture_photometry(img, aper, error=sigma, mask=mask)
result.append(p.field('aperture_sum'))
if sigma is not None:
result.append(p.field('aperture_sum_err'))
else:
result.append(None)
# If a mask is passed, also photometer it!
if mask is not None:
p = photutils.aperture_photometry(mask, aper)
maskedpix = p.field('aperture_sum')
# normalize by number of pixels (pi * rad**2)
maskedpix /= (np.pi * rad**2)
result.append(maskedpix)
else:
result.append(None)
return result
def get_coadd_headers(hdr, tims, band, coadd_headers=None):
if coadd_headers is None:
coadd_headers = {}
# Grab these keywords from all input files for this band...
keys = ['OBSERVAT', 'TELESCOP','OBS-LAT','OBS-LONG','OBS-ELEV',
'INSTRUME','FILTER']
comms = ['Observatory name', 'Telescope name', 'Latitude (deg)', 'Longitude (deg)',
'Elevation (m)', 'Instrument name', 'Filter name']
vals = set()
for tim in tims:
if tim.band != band:
continue
v = []
for key in keys:
v.append(tim.primhdr.get(key,''))
vals.add(tuple(v))
for i,v in enumerate(vals):
for ik,key in enumerate(keys):
if i == 0:
kk = key
else:
kk = key[:7] + '%i'%i
hdr.add_record(dict(name=kk, value=v[ik],comment=comms[ik]))
hdr.add_record(dict(name='FILTERX', value=band, comment='Filter short name'))
# DATE-OBS converted to TAI.
mjds = [tim.time.toMjd() for tim in tims if tim.band == band]
minmjd = min(mjds)
maxmjd = max(mjds)
meanmjd = np.mean(mjds)
hdr.add_record(dict(name='MJD_MIN', value=minmjd,
comment='Earliest MJD in coadd (TAI)'))
hdr.add_record(dict(name='MJD_MAX', value=maxmjd,
comment='Latest MJD in coadd (TAI)'))
hdr.add_record(dict(name='MJD_MEAN', value=meanmjd,
comment='Mean MJD in coadd (TAI)'))
# back to date string in UTC...
import astropy.time
tt = [astropy.time.Time(mjd, format='mjd', scale='tai').utc.isot
for mjd in [minmjd, maxmjd, meanmjd]]
hdr.add_record(dict(
name='DATEOBS1', value=tt[0],
comment='DATE-OBS for the first image in the stack (UTC)'))
hdr.add_record(dict(
name='DATEOBS2', value=tt[1],
comment='DATE-OBS for the last image in the stack (UTC)'))
hdr.add_record(dict(
name='DATEOBS', value=tt[2],
comment='Mean DATE-OBS for the stack (UTC)'))
# add more info from fit_on_coadds
if bool(coadd_headers):
for key in sorted(coadd_headers.keys()):
hdr.add_record(dict(name=key, value=coadd_headers[key][0], comment=coadd_headers[key][1]))
def write_coadd_images(band,
survey, brickname, version_header, tims, targetwcs,
co_sky,
coadd_headers=None,
cowimg=None, cow=None, cowmod=None, cochi2=None,
cowblobmod=None,
psfdetiv=None, galdetiv=None, congood=None,
psfsize=None, **kwargs):
hdr = copy_header_with_wcs(version_header, targetwcs)
# Grab headers from input images...
get_coadd_headers(hdr, tims, band, coadd_headers)
imgs = [
('image', 'image', cowimg),
('invvar', 'wtmap', cow ),
]
if congood is not None:
imgs.append(('nexp', 'expmap', congood))
if psfdetiv is not None:
imgs.append(('depth', 'psfdepth', psfdetiv))
if galdetiv is not None:
imgs.append(('galdepth', 'galdepth', galdetiv))
if psfsize is not None:
imgs.append(('psfsize', 'psfsize', psfsize))
if cowmod is not None:
imgs.extend([
('model', 'model', cowmod),
('chi2', 'chi2', cochi2),
])
if cowblobmod is not None:
imgs.append(('blobmodel', 'blobmodel', cowblobmod))
for name,prodtype,img in imgs:
if img is None:
debug('Image type', prodtype, 'is None -- skipping')
continue
# Make a copy, because each image has different values for
# these headers...
hdr2 = fitsio.FITSHDR()
for r in hdr.records():
hdr2.add_record(r)
hdr2.add_record(dict(name='IMTYPE', value=name,
comment='LegacySurveys image type'))
hdr2.add_record(dict(name='PRODTYPE', value=prodtype,
comment='NOAO image type'))
if name in ['image', 'model', 'blobmodel']:
hdr2.add_record(dict(name='MAGZERO', value=22.5,
comment='Magnitude zeropoint'))
hdr2.add_record(dict(name='BUNIT', value='nanomaggy',
comment='AB mag = 22.5 - 2.5*log10(nanomaggy)'))
if name == 'image' and co_sky is not None:
hdr2.add_record(dict(name='COSKY_%s' % band.upper(), value=co_sky.get(band, 'None'),
comment='Sky level estimated (+subtracted) from coadd'))
if name in ['invvar', 'depth', 'galdepth']:
hdr2.add_record(dict(name='BUNIT', value='1/nanomaggy^2',
comment='Ivar of ABmag=22.5-2.5*log10(nmgy)'))
if name in ['psfsize']:
hdr2.add_record(dict(name='BUNIT', value='arcsec',
comment='Effective PSF size'))
with survey.write_output(name, brick=brickname, band=band,
shape=img.shape) as out:
out.fits.write(img, header=hdr2)
# Pretty much only used for plots; the real deal is make_coadds()
def quick_coadds(tims, bands, targetwcs, images=None,
get_cow=False, get_n2=False, fill_holes=True, get_max=False,
get_saturated=False,
addnoise=False):
W = int(targetwcs.get_width())
H = int(targetwcs.get_height())
coimgs = []
cons = []
if get_n2:
cons2 = []
if get_cow:
# moo
cowimgs = []
wimgs = []
if get_max:
maximgs = []
if get_saturated:
satur = np.zeros((H,W), np.bool)
if addnoise:
noise = np.zeros((H,W), np.float32)
for band in bands:
coimg = np.zeros((H,W), np.float32)
coimg2 = np.zeros((H,W), np.float32)
con = np.zeros((H,W), np.int16)
con2 = np.zeros((H,W), np.int16)
if get_cow:
cowimg = np.zeros((H,W), np.float32)
wimg = np.zeros((H,W), np.float32)
if get_max:
maximg = np.zeros((H,W), np.float32)
for itim,tim in enumerate(tims):
if tim.band != band:
continue
R = tim_get_resamp(tim, targetwcs)
if R is None:
continue
(Yo,Xo,Yi,Xi) = R
nn = (tim.getInvError()[Yi,Xi] > 0)
if images is None:
coimg [Yo,Xo] += tim.getImage()[Yi,Xi] * nn
coimg2[Yo,Xo] += tim.getImage()[Yi,Xi]
if get_max:
maximg[Yo,Xo] = np.maximum(maximg[Yo,Xo], tim.getImage()[Yi,Xi] * nn)
else:
coimg [Yo,Xo] += images[itim][Yi,Xi] * nn
coimg2[Yo,Xo] += images[itim][Yi,Xi]
if get_max:
maximg[Yo,Xo] = np.maximum(maximg[Yo,Xo], images[itim][Yi,Xi] * nn)
if addnoise:
noise[:,:] = 0.
noise[Yo[nn],Xo[nn]] = 1./(tim.getInvError()[Yi,Xi][nn])
coimg += noise * np.random.normal(size=noise.shape)
con [Yo,Xo] += nn
if get_cow:
cowimg[Yo,Xo] += tim.getInvvar()[Yi,Xi] * tim.getImage()[Yi,Xi]
wimg [Yo,Xo] += tim.getInvvar()[Yi,Xi]
if get_saturated and tim.dq is not None:
satur[Yo,Xo] |= ((tim.dq[Yi,Xi] & tim.dq_saturation_bits) > 0)
con2 [Yo,Xo] += 1
coimg /= np.maximum(con,1)
if fill_holes:
coimg[con == 0] = coimg2[con == 0] / np.maximum(1, con2[con == 0])
if get_cow:
cowimg /= np.maximum(wimg, 1e-16)
cowimg[wimg == 0] = coimg[wimg == 0]
cowimgs.append(cowimg)
wimgs.append(wimg)
if get_max:
maximgs.append(maximg)
coimgs.append(coimg)
cons.append(con)
if get_n2:
cons2.append(con2)
rtn = [coimgs,cons]
if get_cow:
rtn.extend([cowimgs, wimgs])
if get_n2:
rtn.append(cons2)
if get_max:
rtn.append(maximgs)
if get_saturated:
rtn.append(satur)
return rtn
|
|
#! /usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from errno import EEXIST
from itertools import islice, izip
from operator import itemgetter
from os import mkdir
from os.path import basename, abspath, dirname, exists, join as pathjoin
from sys import argv as sys_argv, exit, stderr, stdout
from textwrap import wrap
from time import time
import optparse
import math
from swift.common import exceptions
from swift.common.ring import RingBuilder, Ring
from swift.common.ring.builder import MAX_BALANCE
from swift.common.ring.utils import validate_args, \
validate_and_normalize_ip, build_dev_from_opts, \
parse_builder_ring_filename_args, parse_search_value, \
parse_search_values_from_opts, parse_change_values_from_opts, \
dispersion_report, validate_device_name
from swift.common.utils import lock_parent_directory
MAJOR_VERSION = 1
MINOR_VERSION = 3
EXIT_SUCCESS = 0
EXIT_WARNING = 1
EXIT_ERROR = 2
global argv, backup_dir, builder, builder_file, ring_file
argv = backup_dir = builder = builder_file = ring_file = None
def format_device(dev):
"""
Format a device for display.
"""
copy_dev = dev.copy()
for key in ('ip', 'replication_ip'):
if ':' in copy_dev[key]:
copy_dev[key] = '[' + copy_dev[key] + ']'
return ('d%(id)sr%(region)sz%(zone)s-%(ip)s:%(port)sR'
'%(replication_ip)s:%(replication_port)s/%(device)s_'
'"%(meta)s"' % copy_dev)
def _parse_search_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
search_values = {}
if len(args) > 0:
if new_cmd_format or len(args) != 1:
print Commands.search.__doc__.strip()
exit(EXIT_ERROR)
search_values = parse_search_value(args[0])
else:
search_values = parse_search_values_from_opts(opts)
return search_values
except ValueError as e:
print e
exit(EXIT_ERROR)
def _find_parts(devs):
devs = [d['id'] for d in devs]
if not devs or not builder._replica2part2dev:
return None
partition_count = {}
for replica in builder._replica2part2dev:
for partition, device in enumerate(replica):
if device in devs:
if partition not in partition_count:
partition_count[partition] = 0
partition_count[partition] += 1
# Sort by number of found replicas to keep the output format
sorted_partition_count = sorted(
partition_count.iteritems(), key=itemgetter(1), reverse=True)
return sorted_partition_count
def _parse_list_parts_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if len(args) > 0:
if new_cmd_format:
print Commands.list_parts.__doc__.strip()
exit(EXIT_ERROR)
for arg in args:
devs.extend(
builder.search_devs(parse_search_value(arg)) or [])
else:
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)) or [])
return devs
except ValueError as e:
print e
exit(EXIT_ERROR)
def _parse_address(rest):
if rest.startswith('['):
# remove first [] for ip
rest = rest.replace('[', '', 1).replace(']', '', 1)
pos = 0
while (pos < len(rest) and
not (rest[pos] == 'R' or rest[pos] == '/')):
pos += 1
address = rest[:pos]
rest = rest[pos:]
port_start = address.rfind(':')
if port_start == -1:
raise ValueError('Invalid port in add value')
ip = address[:port_start]
try:
port = int(address[(port_start + 1):])
except (TypeError, ValueError):
raise ValueError(
'Invalid port %s in add value' % address[port_start:])
# if this is an ipv6 address then we want to convert it
# to all lowercase and use its fully expanded representation
# to make searches easier
ip = validate_and_normalize_ip(ip)
return (ip, port, rest)
def _parse_add_values(argvish):
"""
Parse devices to add as specified on the command line.
Will exit on error and spew warnings.
:returns: array of device dicts
"""
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
parsed_devs = []
if len(args) > 0:
if new_cmd_format or len(args) % 2 != 0:
print Commands.add.__doc__.strip()
exit(EXIT_ERROR)
devs_and_weights = izip(islice(args, 0, len(args), 2),
islice(args, 1, len(args), 2))
for devstr, weightstr in devs_and_weights:
region = 1
rest = devstr
if devstr.startswith('r'):
i = 1
while i < len(devstr) and devstr[i].isdigit():
i += 1
region = int(devstr[1:i])
rest = devstr[i:]
else:
stderr.write('WARNING: No region specified for %s. '
'Defaulting to region 1.\n' % devstr)
if not rest.startswith('z'):
raise ValueError('Invalid add value: %s' % devstr)
i = 1
while i < len(rest) and rest[i].isdigit():
i += 1
zone = int(rest[1:i])
rest = rest[i:]
if not rest.startswith('-'):
raise ValueError('Invalid add value: %s' % devstr)
ip, port, rest = _parse_address(rest[1:])
replication_ip = ip
replication_port = port
if rest.startswith('R'):
replication_ip, replication_port, rest = \
_parse_address(rest[1:])
if not rest.startswith('/'):
raise ValueError(
'Invalid add value: %s' % devstr)
i = 1
while i < len(rest) and rest[i] != '_':
i += 1
device_name = rest[1:i]
if not validate_device_name(device_name):
raise ValueError('Invalid device name')
rest = rest[i:]
meta = ''
if rest.startswith('_'):
meta = rest[1:]
weight = float(weightstr)
if weight < 0:
raise ValueError('Invalid weight value: %s' % devstr)
parsed_devs.append({'region': region, 'zone': zone, 'ip': ip,
'port': port, 'device': device_name,
'replication_ip': replication_ip,
'replication_port': replication_port,
'weight': weight, 'meta': meta})
else:
parsed_devs.append(build_dev_from_opts(opts))
return parsed_devs
def _set_weight_values(devs, weight):
if not devs:
print('Search value matched 0 devices.\n'
'The on-disk ring builder is unchanged.')
exit(EXIT_ERROR)
if len(devs) > 1:
print 'Matched more than one device:'
for dev in devs:
print ' %s' % format_device(dev)
if raw_input('Are you sure you want to update the weight for '
'these %s devices? (y/N) ' % len(devs)) != 'y':
print 'Aborting device modifications'
exit(EXIT_ERROR)
for dev in devs:
builder.set_dev_weight(dev['id'], weight)
print '%s weight set to %s' % (format_device(dev),
dev['weight'])
def _parse_set_weight_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if not new_cmd_format:
if len(args) % 2 != 0:
print Commands.set_weight.__doc__.strip()
exit(EXIT_ERROR)
devs_and_weights = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for devstr, weightstr in devs_and_weights:
devs.extend(builder.search_devs(
parse_search_value(devstr)) or [])
weight = float(weightstr)
_set_weight_values(devs, weight)
else:
if len(args) != 1:
print Commands.set_weight.__doc__.strip()
exit(EXIT_ERROR)
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)) or [])
weight = float(args[0])
_set_weight_values(devs, weight)
except ValueError as e:
print e
exit(EXIT_ERROR)
def _set_info_values(devs, change):
if not devs:
print("Search value matched 0 devices.\n"
"The on-disk ring builder is unchanged.")
exit(EXIT_ERROR)
if len(devs) > 1:
print 'Matched more than one device:'
for dev in devs:
print ' %s' % format_device(dev)
if raw_input('Are you sure you want to update the info for '
'these %s devices? (y/N) ' % len(devs)) != 'y':
print 'Aborting device modifications'
exit(EXIT_ERROR)
for dev in devs:
orig_dev_string = format_device(dev)
test_dev = dict(dev)
for key in change:
test_dev[key] = change[key]
for check_dev in builder.devs:
if not check_dev or check_dev['id'] == test_dev['id']:
continue
if check_dev['ip'] == test_dev['ip'] and \
check_dev['port'] == test_dev['port'] and \
check_dev['device'] == test_dev['device']:
print 'Device %d already uses %s:%d/%s.' % \
(check_dev['id'], check_dev['ip'],
check_dev['port'], check_dev['device'])
exit(EXIT_ERROR)
for key in change:
dev[key] = change[key]
print 'Device %s is now %s' % (orig_dev_string,
format_device(dev))
def _parse_set_info_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
if not new_cmd_format:
if len(args) % 2 != 0:
print Commands.search.__doc__.strip()
exit(EXIT_ERROR)
searches_and_changes = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for search_value, change_value in searches_and_changes:
devs = builder.search_devs(parse_search_value(search_value))
change = {}
ip = ''
if len(change_value) and change_value[0].isdigit():
i = 1
while (i < len(change_value) and
change_value[i] in '0123456789.'):
i += 1
ip = change_value[:i]
change_value = change_value[i:]
elif len(change_value) and change_value[0] == '[':
i = 1
while i < len(change_value) and change_value[i] != ']':
i += 1
i += 1
ip = change_value[:i].lstrip('[').rstrip(']')
change_value = change_value[i:]
if ip:
change['ip'] = validate_and_normalize_ip(ip)
if change_value.startswith(':'):
i = 1
while i < len(change_value) and change_value[i].isdigit():
i += 1
change['port'] = int(change_value[1:i])
change_value = change_value[i:]
if change_value.startswith('R'):
change_value = change_value[1:]
replication_ip = ''
if len(change_value) and change_value[0].isdigit():
i = 1
while (i < len(change_value) and
change_value[i] in '0123456789.'):
i += 1
replication_ip = change_value[:i]
change_value = change_value[i:]
elif len(change_value) and change_value[0] == '[':
i = 1
while i < len(change_value) and change_value[i] != ']':
i += 1
i += 1
replication_ip = \
change_value[:i].lstrip('[').rstrip(']')
change_value = change_value[i:]
if replication_ip:
change['replication_ip'] = \
validate_and_normalize_ip(replication_ip)
if change_value.startswith(':'):
i = 1
while i < len(change_value) and change_value[i].isdigit():
i += 1
change['replication_port'] = int(change_value[1:i])
change_value = change_value[i:]
if change_value.startswith('/'):
i = 1
while i < len(change_value) and change_value[i] != '_':
i += 1
change['device'] = change_value[1:i]
change_value = change_value[i:]
if change_value.startswith('_'):
change['meta'] = change_value[1:]
change_value = ''
if change_value or not change:
raise ValueError('Invalid set info change value: %s' %
repr(argvish[1]))
_set_info_values(devs, change)
else:
devs = builder.search_devs(parse_search_values_from_opts(opts))
change = parse_change_values_from_opts(opts)
_set_info_values(devs, change)
def _parse_remove_values(argvish):
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if len(args) > 0:
if new_cmd_format:
print Commands.remove.__doc__.strip()
exit(EXIT_ERROR)
for arg in args:
devs.extend(builder.search_devs(
parse_search_value(arg)) or [])
else:
devs.extend(builder.search_devs(
parse_search_values_from_opts(opts)))
return devs
except ValueError as e:
print e
exit(EXIT_ERROR)
class Commands(object):
def unknown():
print 'Unknown command: %s' % argv[2]
exit(EXIT_ERROR)
def create():
"""
swift-ring-builder <builder_file> create <part_power> <replicas>
<min_part_hours>
Creates <builder_file> with 2^<part_power> partitions and <replicas>.
<min_part_hours> is number of hours to restrict moving a partition more
than once.
"""
if len(argv) < 6:
print Commands.create.__doc__.strip()
exit(EXIT_ERROR)
builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5]))
backup_dir = pathjoin(dirname(argv[1]), 'backups')
try:
mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
builder.save(pathjoin(backup_dir, '%d.' % time() + basename(argv[1])))
builder.save(argv[1])
exit(EXIT_SUCCESS)
def default():
"""
swift-ring-builder <builder_file>
Shows information about the ring and the devices within.
"""
print '%s, build version %d' % (argv[1], builder.version)
regions = 0
zones = 0
balance = 0
dev_count = 0
if builder.devs:
regions = len(set(d['region'] for d in builder.devs
if d is not None))
zones = len(set((d['region'], d['zone']) for d in builder.devs
if d is not None))
dev_count = len([dev for dev in builder.devs
if dev is not None])
balance = builder.get_balance()
dispersion_trailer = '' if builder.dispersion is None else (
', %.02f dispersion' % (builder.dispersion))
print '%d partitions, %.6f replicas, %d regions, %d zones, ' \
'%d devices, %.02f balance%s' % (
builder.parts, builder.replicas, regions, zones, dev_count,
balance, dispersion_trailer)
print 'The minimum number of hours before a partition can be ' \
'reassigned is %s' % builder.min_part_hours
print 'The overload factor is %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload)
if builder.devs:
print 'Devices: id region zone ip address port ' \
'replication ip replication port name ' \
'weight partitions balance meta'
weighted_parts = builder.parts * builder.replicas / \
sum(d['weight'] for d in builder.devs if d is not None)
for dev in builder.devs:
if dev is None:
continue
if not dev['weight']:
if dev['parts']:
balance = MAX_BALANCE
else:
balance = 0
else:
balance = 100.0 * dev['parts'] / \
(dev['weight'] * weighted_parts) - 100.0
print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f '
'%10s %7.02f %s' %
(dev['id'], dev['region'], dev['zone'], dev['ip'],
dev['port'], dev['replication_ip'],
dev['replication_port'], dev['device'], dev['weight'],
dev['parts'], balance, dev['meta']))
exit(EXIT_SUCCESS)
def search():
"""
swift-ring-builder <builder_file> search <search-value>
or
swift-ring-builder <builder_file> search
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Shows information about matching devices.
"""
if len(argv) < 4:
print Commands.search.__doc__.strip()
print
print parse_search_value.__doc__.strip()
exit(EXIT_ERROR)
devs = builder.search_devs(_parse_search_values(argv[3:]))
if not devs:
print 'No matching devices found'
exit(EXIT_ERROR)
print 'Devices: id region zone ip address port ' \
'replication ip replication port name weight partitions ' \
'balance meta'
weighted_parts = builder.parts * builder.replicas / \
sum(d['weight'] for d in builder.devs if d is not None)
for dev in devs:
if not dev['weight']:
if dev['parts']:
balance = MAX_BALANCE
else:
balance = 0
else:
balance = 100.0 * dev['parts'] / \
(dev['weight'] * weighted_parts) - 100.0
print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f %10s '
'%7.02f %s' %
(dev['id'], dev['region'], dev['zone'], dev['ip'],
dev['port'], dev['replication_ip'], dev['replication_port'],
dev['device'], dev['weight'], dev['parts'], balance,
dev['meta']))
exit(EXIT_SUCCESS)
def list_parts():
"""
swift-ring-builder <builder_file> list_parts <search-value> [<search-value>] ..
or
swift-ring-builder <builder_file> list_parts
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Returns a 2 column list of all the partitions that are assigned to any of
the devices matching the search values given. The first column is the
assigned partition number and the second column is the number of device
matches for that partition. The list is ordered from most number of matches
to least. If there are a lot of devices to match against, this command
could take a while to run.
"""
if len(argv) < 4:
print Commands.list_parts.__doc__.strip()
print
print parse_search_value.__doc__.strip()
exit(EXIT_ERROR)
if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % argv[1])
exit(EXIT_ERROR)
devs = _parse_list_parts_values(argv[3:])
if not devs:
print 'No matching devices found'
exit(EXIT_ERROR)
sorted_partition_count = _find_parts(devs)
if not sorted_partition_count:
print 'No matching devices found'
exit(EXIT_ERROR)
print 'Partition Matches'
for partition, count in sorted_partition_count:
print '%9d %7d' % (partition, count)
exit(EXIT_SUCCESS)
def add():
"""
swift-ring-builder <builder_file> add
[r<region>]z<zone>-<ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
<weight>
[[r<region>]z<zone>-<ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
<weight>] ...
Where <r_ip> and <r_port> are replication ip and port.
or
swift-ring-builder <builder_file> add
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
[--replication-ip <r_ip or r_hostname>] [--replication-port <r_port>]
--device <device_name> --weight <weight>
[--meta <meta>]
Adds devices to the ring with the given information. No partitions will be
assigned to the new device until after running 'rebalance'. This is so you
can make multiple device changes and rebalance them all just once.
"""
if len(argv) < 5:
print Commands.add.__doc__.strip()
exit(EXIT_ERROR)
try:
for new_dev in _parse_add_values(argv[3:]):
for dev in builder.devs:
if dev is None:
continue
if dev['ip'] == new_dev['ip'] and \
dev['port'] == new_dev['port'] and \
dev['device'] == new_dev['device']:
print 'Device %d already uses %s:%d/%s.' % \
(dev['id'], dev['ip'],
dev['port'], dev['device'])
print "The on-disk ring builder is unchanged.\n"
exit(EXIT_ERROR)
dev_id = builder.add_dev(new_dev)
print('Device %s with %s weight got id %s' %
(format_device(new_dev), new_dev['weight'], dev_id))
except ValueError as err:
print err
print 'The on-disk ring builder is unchanged.'
exit(EXIT_ERROR)
builder.save(argv[1])
exit(EXIT_SUCCESS)
def set_weight():
"""
swift-ring-builder <builder_file> set_weight <search-value> <weight>
[<search-value> <weight] ...
or
swift-ring-builder <builder_file> set_weight
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Resets the devices' weights. No partitions will be reassigned to or from
the device until after running 'rebalance'. This is so you can make
multiple device changes and rebalance them all just once.
"""
# if len(argv) < 5 or len(argv) % 2 != 1:
if len(argv) < 5:
print Commands.set_weight.__doc__.strip()
print
print parse_search_value.__doc__.strip()
exit(EXIT_ERROR)
_parse_set_weight_values(argv[3:])
builder.save(argv[1])
exit(EXIT_SUCCESS)
def set_info():
"""
swift-ring-builder <builder_file> set_info
<search-value> <ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>
[<search-value> <ip>:<port>[R<r_ip>:<r_port>]/<device_name>_<meta>] ...
or
swift-ring-builder <builder_file> set_info
--ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta>
--change-ip <ip or hostname> --change-port <port>
--change-replication-ip <r_ip or r_hostname>
--change-replication-port <r_port>
--change-device <device_name>
--change-meta <meta>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
For each search-value, resets the matched device's information.
This information isn't used to assign partitions, so you can use
'write_ring' afterward to rewrite the current ring with the newer
device information. Any of the parts are optional in the final
<ip>:<port>/<device_name>_<meta> parameter; just give what you
want to change. For instance set_info d74 _"snet: 5.6.7.8" would
just update the meta data for device id 74.
"""
if len(argv) < 5:
print Commands.set_info.__doc__.strip()
print
print parse_search_value.__doc__.strip()
exit(EXIT_ERROR)
try:
_parse_set_info_values(argv[3:])
except ValueError as err:
print err
exit(EXIT_ERROR)
builder.save(argv[1])
exit(EXIT_SUCCESS)
def remove():
"""
swift-ring-builder <builder_file> remove <search-value> [search-value ...]
or
swift-ring-builder <builder_file> search
--region <region> --zone <zone> --ip <ip or hostname> --port <port>
--replication-ip <r_ip or r_hostname> --replication-port <r_port>
--device <device_name> --meta <meta> --weight <weight>
Where <r_ip>, <r_hostname> and <r_port> are replication ip, hostname
and port.
Any of the options are optional in both cases.
Removes the device(s) from the ring. This should normally just be used for
a device that has failed. For a device you wish to decommission, it's best
to set its weight to 0, wait for it to drain all its data, then use this
remove command. This will not take effect until after running 'rebalance'.
This is so you can make multiple device changes and rebalance them all just
once.
"""
if len(argv) < 4:
print Commands.remove.__doc__.strip()
print
print parse_search_value.__doc__.strip()
exit(EXIT_ERROR)
devs = _parse_remove_values(argv[3:])
if not devs:
print('Search value matched 0 devices.\n'
'The on-disk ring builder is unchanged.')
exit(EXIT_ERROR)
if len(devs) > 1:
print 'Matched more than one device:'
for dev in devs:
print ' %s' % format_device(dev)
if raw_input('Are you sure you want to remove these %s '
'devices? (y/N) ' % len(devs)) != 'y':
print 'Aborting device removals'
exit(EXIT_ERROR)
for dev in devs:
try:
builder.remove_dev(dev['id'])
except exceptions.RingBuilderError as e:
print '-' * 79
print(
'An error occurred while removing device with id %d\n'
'This usually means that you attempted to remove\n'
'the last device in a ring. If this is the case,\n'
'consider creating a new ring instead.\n'
'The on-disk ring builder is unchanged.\n'
'Original exception message: %s' %
(dev['id'], e))
print '-' * 79
exit(EXIT_ERROR)
print '%s marked for removal and will ' \
'be removed next rebalance.' % format_device(dev)
builder.save(argv[1])
exit(EXIT_SUCCESS)
def rebalance():
"""
swift-ring-builder <builder_file> rebalance [options]
Attempts to rebalance the ring by reassigning partitions that haven't been
recently reassigned.
"""
usage = Commands.rebalance.__doc__.strip()
parser = optparse.OptionParser(usage)
parser.add_option('-f', '--force', action='store_true',
help='Force a rebalanced ring to save even '
'if < 1% of parts changed')
parser.add_option('-s', '--seed', help="seed to use for rebalance")
parser.add_option('-d', '--debug', action='store_true',
help="print debug information")
options, args = parser.parse_args(argv)
def get_seed(index):
if options.seed:
return options.seed
try:
return args[index]
except IndexError:
pass
if options.debug:
logger = logging.getLogger("swift.ring.builder")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(stdout)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
devs_changed = builder.devs_changed
try:
last_balance = builder.get_balance()
parts, balance = builder.rebalance(seed=get_seed(3))
except exceptions.RingBuilderError as e:
print '-' * 79
print("An error has occurred during ring validation. Common\n"
"causes of failure are rings that are empty or do not\n"
"have enough devices to accommodate the replica count.\n"
"Original exception message:\n %s" %
(e,))
print '-' * 79
exit(EXIT_ERROR)
if not (parts or options.force):
print 'No partitions could be reassigned.'
print 'Either none need to be or none can be due to ' \
'min_part_hours [%s].' % builder.min_part_hours
exit(EXIT_WARNING)
# If we set device's weight to zero, currently balance will be set
# special value(MAX_BALANCE) until zero weighted device return all
# its partitions. So we cannot check balance has changed.
# Thus we need to check balance or last_balance is special value.
if not options.force and \
not devs_changed and abs(last_balance - balance) < 1 and \
not (last_balance == MAX_BALANCE and balance == MAX_BALANCE):
print 'Cowardly refusing to save rebalance as it did not change ' \
'at least 1%.'
exit(EXIT_WARNING)
try:
builder.validate()
except exceptions.RingValidationError as e:
print '-' * 79
print("An error has occurred during ring validation. Common\n"
"causes of failure are rings that are empty or do not\n"
"have enough devices to accommodate the replica count.\n"
"Original exception message:\n %s" %
(e,))
print '-' * 79
exit(EXIT_ERROR)
print ('Reassigned %d (%.02f%%) partitions. '
'Balance is now %.02f. '
'Dispersion is now %.02f' % (
parts, 100.0 * parts / builder.parts,
balance,
builder.dispersion))
status = EXIT_SUCCESS
if builder.dispersion > 0:
print '-' * 79
print(
'NOTE: Dispersion of %.06f indicates some parts are not\n'
' optimally dispersed.\n\n'
' You may want to adjust some device weights, increase\n'
' the overload or review the dispersion report.' %
builder.dispersion)
status = EXIT_WARNING
print '-' * 79
elif balance > 5 and balance / 100.0 > builder.overload:
print '-' * 79
print 'NOTE: Balance of %.02f indicates you should push this ' % \
balance
print ' ring, wait at least %d hours, and rebalance/repush.' \
% builder.min_part_hours
print '-' * 79
status = EXIT_WARNING
ts = time()
builder.get_ring().save(
pathjoin(backup_dir, '%d.' % ts + basename(ring_file)))
builder.save(pathjoin(backup_dir, '%d.' % ts + basename(argv[1])))
builder.get_ring().save(ring_file)
builder.save(argv[1])
exit(status)
def dispersion():
"""
swift-ring-builder <builder_file> dispersion <search_filter> [options]
Output report on dispersion.
--verbose option will display dispersion graph broken down by tier
You can filter which tiers are evaluated to drill down using a regex
in the optional search_filter arguemnt.
The reports columns are:
Tier : the name of the tier
parts : the total number of partitions with assignment in the tier
% : the percentage of parts in the tier with replicas over assigned
max : maximum replicas a part should have assigned at the tier
0 - N : the number of parts with that many replicas assigned
e.g.
Tier: parts % max 0 1 2 3
r1z1 1022 79.45 1 2 210 784 28
r1z1 has 1022 total parts assigned, 79% of them have more than the
recommend max replica count of 1 assigned. Only 2 parts in the ring
are *not* assigned in this tier (0 replica count), 210 parts have
the recommend replica count of 1, 784 have 2 replicas, and 28 sadly
have all three replicas in this tier.
"""
status = EXIT_SUCCESS
if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % argv[1])
exit(EXIT_ERROR)
usage = Commands.dispersion.__doc__.strip()
parser = optparse.OptionParser(usage)
parser.add_option('-v', '--verbose', action='store_true',
help='Display dispersion report for tiers')
options, args = parser.parse_args(argv)
if args[3:]:
search_filter = args[3]
else:
search_filter = None
report = dispersion_report(builder, search_filter=search_filter,
verbose=options.verbose)
print 'Dispersion is %.06f, Balance is %.06f, Overload is %0.2f%%' % (
builder.dispersion, builder.get_balance(), builder.overload * 100)
if report['worst_tier']:
status = EXIT_WARNING
print 'Worst tier is %.06f (%s)' % (report['max_dispersion'],
report['worst_tier'])
if report['graph']:
replica_range = range(int(math.ceil(builder.replicas + 1)))
part_count_width = '%%%ds' % max(len(str(builder.parts)), 5)
replica_counts_tmpl = ' '.join(part_count_width for i in
replica_range)
tiers = (tier for tier, _junk in report['graph'])
tier_width = max(max(map(len, tiers)), 30)
header_line = ('%-' + str(tier_width) +
's ' + part_count_width +
' %6s %6s ' + replica_counts_tmpl) % tuple(
['Tier', 'Parts', '%', 'Max'] + replica_range)
underline = '-' * len(header_line)
print(underline)
print(header_line)
print(underline)
for tier_name, dispersion in report['graph']:
replica_counts_repr = replica_counts_tmpl % tuple(
dispersion['replicas'])
print ('%-' + str(tier_width) + 's ' + part_count_width +
' %6.02f %6d %s') % (tier_name,
dispersion['placed_parts'],
dispersion['dispersion'],
dispersion['max_replicas'],
replica_counts_repr,
)
exit(status)
def validate():
"""
swift-ring-builder <builder_file> validate
Just runs the validation routines on the ring.
"""
builder.validate()
exit(EXIT_SUCCESS)
def write_ring():
"""
swift-ring-builder <builder_file> write_ring
Just rewrites the distributable ring file. This is done automatically after
a successful rebalance, so really this is only useful after one or more
'set_info' calls when no rebalance is needed but you want to send out the
new device information.
"""
ring_data = builder.get_ring()
if not ring_data._replica2part2dev_id:
if ring_data.devs:
print 'Warning: Writing a ring with no partition ' \
'assignments but with devices; did you forget to run ' \
'"rebalance"?'
else:
print 'Warning: Writing an empty ring'
ring_data.save(
pathjoin(backup_dir, '%d.' % time() + basename(ring_file)))
ring_data.save(ring_file)
exit(EXIT_SUCCESS)
def write_builder():
"""
swift-ring-builder <ring_file> write_builder [min_part_hours]
Recreate a builder from a ring file (lossy) if you lost your builder
backups. (Protip: don't lose your builder backups).
[min_part_hours] is one of those numbers lost to the builder,
you can change it with set_min_part_hours.
"""
if exists(builder_file):
print 'Cowardly refusing to overwrite existing ' \
'Ring Builder file: %s' % builder_file
exit(EXIT_ERROR)
if len(argv) > 3:
min_part_hours = int(argv[3])
else:
stderr.write("WARNING: default min_part_hours may not match "
"the value in the lost builder.\n")
min_part_hours = 24
ring = Ring(ring_file)
for dev in ring.devs:
dev.update({
'parts': 0,
'parts_wanted': 0,
})
builder_dict = {
'part_power': 32 - ring._part_shift,
'replicas': float(ring.replica_count),
'min_part_hours': min_part_hours,
'parts': ring.partition_count,
'devs': ring.devs,
'devs_changed': False,
'version': 0,
'_replica2part2dev': ring._replica2part2dev_id,
'_last_part_moves_epoch': None,
'_last_part_moves': None,
'_last_part_gather_start': 0,
'_remove_devs': [],
}
builder = RingBuilder.from_dict(builder_dict)
for parts in builder._replica2part2dev:
for dev_id in parts:
builder.devs[dev_id]['parts'] += 1
builder._set_parts_wanted()
builder.save(builder_file)
def pretend_min_part_hours_passed():
builder.pretend_min_part_hours_passed()
builder.save(argv[1])
exit(EXIT_SUCCESS)
def set_min_part_hours():
"""
swift-ring-builder <builder_file> set_min_part_hours <hours>
Changes the <min_part_hours> to the given <hours>. This should be set to
however long a full replication/update cycle takes. We're working on a way
to determine this more easily than scanning logs.
"""
if len(argv) < 4:
print Commands.set_min_part_hours.__doc__.strip()
exit(EXIT_ERROR)
builder.change_min_part_hours(int(argv[3]))
print 'The minimum number of hours before a partition can be ' \
'reassigned is now set to %s' % argv[3]
builder.save(argv[1])
exit(EXIT_SUCCESS)
def set_replicas():
"""
swift-ring-builder <builder_file> set_replicas <replicas>
Changes the replica count to the given <replicas>. <replicas> may
be a floating-point value, in which case some partitions will have
floor(<replicas>) replicas and some will have ceiling(<replicas>)
in the correct proportions.
A rebalance is needed to make the change take effect.
"""
if len(argv) < 4:
print Commands.set_replicas.__doc__.strip()
exit(EXIT_ERROR)
new_replicas = argv[3]
try:
new_replicas = float(new_replicas)
except ValueError:
print Commands.set_replicas.__doc__.strip()
print "\"%s\" is not a valid number." % new_replicas
exit(EXIT_ERROR)
if new_replicas < 1:
print "Replica count must be at least 1."
exit(EXIT_ERROR)
builder.set_replicas(new_replicas)
print 'The replica count is now %.6f.' % builder.replicas
print 'The change will take effect after the next rebalance.'
builder.save(argv[1])
exit(EXIT_SUCCESS)
def set_overload():
"""
swift-ring-builder <builder_file> set_overload <overload>[%]
Changes the overload factor to the given <overload>.
A rebalance is needed to make the change take effect.
"""
if len(argv) < 4:
print Commands.set_overload.__doc__.strip()
exit(EXIT_ERROR)
new_overload = argv[3]
if new_overload.endswith('%'):
percent = True
new_overload = new_overload.rstrip('%')
else:
percent = False
try:
new_overload = float(new_overload)
except ValueError:
print Commands.set_overload.__doc__.strip()
print "%r is not a valid number." % new_overload
exit(EXIT_ERROR)
if percent:
new_overload *= 0.01
if new_overload < 0:
print "Overload must be non-negative."
exit(EXIT_ERROR)
if new_overload > 1 and not percent:
print "!?! Warning overload is greater than 100% !?!"
status = EXIT_WARNING
else:
status = EXIT_SUCCESS
builder.set_overload(new_overload)
print 'The overload factor is now %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload)
print 'The change will take effect after the next rebalance.'
builder.save(argv[1])
exit(status)
def main(arguments=None):
global argv, backup_dir, builder, builder_file, ring_file
if arguments:
argv = arguments
else:
argv = sys_argv
if len(argv) < 2:
print "swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % \
globals()
print Commands.default.__doc__.strip()
print
cmds = [c for c, f in Commands.__dict__.iteritems()
if f.__doc__ and c[0] != '_' and c != 'default']
cmds.sort()
for cmd in cmds:
print Commands.__dict__[cmd].__doc__.strip()
print
print parse_search_value.__doc__.strip()
print
for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
subsequent_indent=' '):
print line
print('Exit codes: 0 = operation successful\n'
' 1 = operation completed with warnings\n'
' 2 = error')
exit(EXIT_SUCCESS)
builder_file, ring_file = parse_builder_ring_filename_args(argv)
try:
builder = RingBuilder.load(builder_file)
except exceptions.UnPicklingError as e:
print e
exit(EXIT_ERROR)
except (exceptions.FileNotFoundError, exceptions.PermissionError) as e:
if len(argv) < 3 or argv[2] not in('create', 'write_builder'):
print e
exit(EXIT_ERROR)
except Exception as e:
print('Problem occurred while reading builder file: %s. %s' %
(argv[1], e))
exit(EXIT_ERROR)
backup_dir = pathjoin(dirname(argv[1]), 'backups')
try:
mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
if len(argv) == 2:
command = "default"
else:
command = argv[2]
if argv[0].endswith('-safe'):
try:
with lock_parent_directory(abspath(argv[1]), 15):
Commands.__dict__.get(command, Commands.unknown.im_func)()
except exceptions.LockTimeout:
print "Ring/builder dir currently locked."
exit(2)
else:
Commands.__dict__.get(command, Commands.unknown.im_func)()
if __name__ == '__main__':
main()
|
|
r"""
Implements turbulence closure model stability functions.
.. math::
S_m &= S_m(\alpha_M, \alpha_N) \\
S_\rho &= S_\rho(\alpha_M, \alpha_N)
where :math:`\alpha_M, \alpha_N` are the normalized shear and buoyancy frequency
.. math::
\alpha_M &= \frac{k^2}{\varepsilon^2} M^2 \\
\alpha_N &= \frac{k^2}{\varepsilon^2} N^2
The following stability functions have been implemented
- Canuto A
- Canuto B
- Kantha-Clayson
- Cheng
References:
Umlauf, L. and Burchard, H. (2005). Second-order turbulence closure models
for geophysical boundary layers. A review of recent work. Continental Shelf
Research, 25(7-8):795--827. http://dx.doi.org/10.1016/j.csr.2004.08.004
Burchard, H. and Bolding, K. (2001). Comparative Analysis of Four
Second-Moment Turbulence Closure Models for the Oceanic Mixed Layer. Journal of
Physical Oceanography, 31(8):1943--1968.
http://dx.doi.org/10.1175/1520-0485(2001)031
Umlauf, L. and Burchard, H. (2003). A generic length-scale equation for
geophysical turbulence models. Journal of Marine Research, 61:235--265(31).
http://dx.doi.org/10.1357/002224003322005087
"""
from __future__ import absolute_import
import numpy as np
from abc import ABC, abstractmethod
from scipy.optimize import minimize
from .log import print_output
__all__ = [
'StabilityFunctionBase',
'StabilityFunctionCanutoA',
'StabilityFunctionCanutoB',
'StabilityFunctionCheng',
'GOTMStabilityFunctionCanutoA',
'GOTMStabilityFunctionCanutoB',
'GOTMStabilityFunctionCheng',
'GOTMStabilityFunctionKanthaClayson',
'compute_normalized_frequencies'
]
def compute_normalized_frequencies(shear2, buoy2, k, eps, verbose=False):
r"""
Computes normalized buoyancy and shear frequency squared.
.. math::
\alpha_M &= \frac{k^2}{\varepsilon^2} M^2 \\
\alpha_N &= \frac{k^2}{\varepsilon^2} N^2
From Burchard and Bolding (2001).
:arg shear2: :math:`M^2`
:arg buoy2: :math:`N^2`
:arg k: turbulent kinetic energy
:arg eps: TKE dissipation rate
"""
alpha_buoy = k**2/eps**2*buoy2
alpha_shear = k**2/eps**2*shear2
if verbose:
print_output('{:8s} {:8.3e} {:8.3e}'.format('M2', shear2.min(), shear2.max()))
print_output('{:8s} {:8.3e} {:8.3e}'.format('N2', buoy2.min(), buoy2.max()))
print_output('{:8s} {:10.3e} {:10.3e}'.format('a_buoy', alpha_buoy.min(), alpha_buoy.max()))
print_output('{:8s} {:10.3e} {:10.3e}'.format('a_shear', alpha_shear.min(), alpha_shear.max()))
return alpha_buoy, alpha_shear
class StabilityFunctionBase(ABC):
"""
Base class for all stability functions
"""
@property
@abstractmethod
def name(self):
pass
def __init__(self, lim_alpha_shear=True, lim_alpha_buoy=True,
smooth_alpha_buoy_lim=True, alpha_buoy_crit=-1.2):
r"""
:kwarg bool lim_alpha_shear: limit maximum :math:`\alpha_M` values
(see Umlauf and Burchard (2005) eq. 44)
:kwarg bool lim_alpha_buoy: limit minimum (negative) :math:`\alpha_N` values
(see Umlauf and Burchard (2005))
:kwarg bool smooth_alpha_buoy_lim: if :math:`\alpha_N` is limited, apply a
smooth limiter (see Burchard and Bolding (2001) eq. 19). Otherwise
:math:`\alpha_N` is clipped at minimum value.
:kwarg float alpha_buoy_crit: parameter for :math:`\alpha_N` smooth limiter
"""
self.lim_alpha_shear = lim_alpha_shear
self.lim_alpha_buoy = lim_alpha_buoy
self.smooth_alpha_buoy_lim = smooth_alpha_buoy_lim
self.alpha_buoy_crit = alpha_buoy_crit
# for plotting and such
self.description = []
if self.lim_alpha_shear:
self.description += ['lim', 'shear']
if self.lim_alpha_buoy:
self.description += ['lim', 'alpha']
if self.smooth_alpha_buoy_lim:
self.description += ['smooth']
self.description += ['ac'+str(self.alpha_buoy_crit)]
self.description = ' '.join(self.description)
# denominator
self.d0 = 1.0
self.d1 = 1.0
self.d2 = 1.0
self.d3 = 1.0
self.d4 = 1.0
self.d5 = 1.0
# c_mu
self.n0 = 0.0
self.n1 = 0.0
self.n2 = 0.0
# c_mu_p
self.nb0 = 0.0
self.nb1 = 0.0
self.nb2 = 0.0
def compute_alpha_shear_steady(self, ri_st, analytical=True):
r"""
Compute the steady-state :math:`\alpha_M`.
Under steady-state conditions, the stability functions satisfy:
.. math::
S_m \alpha_M - S_\rho \alpha_M Ri_{st} = 1.0
(Umlauf and Buchard, 2005, eq A.15) from which :math:`\alpha_M` can be
solved for given gradient Richardson number :math:`Ri_{st}`.
:arg float ri_st: Gradient Richardson number
:kwarg bool analytical: If True (default), solve analytically using the
coefficients of the stability function. Otherwise, solve
:math:`\alpha_M` numerically from the equilibrium condition.
"""
if not analytical:
# A) solve numerically
# use equilibrium equation (Umlauf and Buchard, 2005, eq A.15)
# s_m*a_shear - s_h*a_shear*ri_st = 1.0
# to solve a_shear at equilibrium
# NOTE may fail/return incorrect solution for ri_st < -4
def cost(a_shear):
a_buoy = ri_st*a_shear
s_m, s_h = self.eval_funcs(a_buoy, a_shear)
res = s_m*a_shear - s_h*a_buoy - 1.0
return res**2
p = minimize(cost, 1.0)
assert p.success, 'solving alpha_shear failed, Ri_st={:}'.format(ri_st)
a_shear = p.x[0]
else:
# B) solve analytically
# compute alpha_shear for equilibrium condition (Umlauf and Buchard, 2005, eq A.19)
# aM^2 (-d5 + n2 - (d3 - n1 + nb2 )Ri - (d4 + nb1)Ri^2) + aM (-d2 + n0 - (d1 + nb0)Ri) - d0 = 0
# NOTE this is more robust method
a = -self.d5 + self.n2 - (self.d3 - self.n1 + self.nb2)*ri_st - (self.d4 + self.nb1)*ri_st**2
b = -self.d2 + self.n0 - (self.d1 + self.nb0)*ri_st
c = -self.d0
a_shear = (-b + np.sqrt(b**2 - 4*a*c))/2/a
return a_shear
def compute_c3_minus(self, c1, c2, ri_st):
r"""
Compute c3_minus parameter from c1 and c2 parameters.
c3_minus is solved from equation
.. math::
Ri_{st} = \frac{s_m}{s_h} \frac{c2 - c1}{c2 - c3_{-}}
where :math:`Ri_{st}` is the steady state gradient Richardson number.
(see Burchard and Bolding, 2001, eq 32)
"""
a_shear = self.compute_alpha_shear_steady(ri_st, analytical=False)
# compute aN from Ri_st and aM, Ri_st = aN/aM
a_buoy = ri_st*a_shear
# evaluate stability functions for equilibrium conditions
s_m, s_h = self.eval_funcs(a_buoy, a_shear)
# compute c3 minus from Umlauf and Burchard (2005)
c3_minus = c2 - (c2 - c1)*s_m/s_h/ri_st
# check error in ri_st
err = s_m/s_h*(c2 - c1)/(c2 - c3_minus) - ri_st
assert np.abs(err) < 1e-5, 'steady state gradient Richardson number does not match'
return c3_minus
def compute_cmu0(self, analytical=True):
r"""
Compute parameter :math:`c_\mu^0`
See: Umlauf and Buchard (2005) eq A.22
:kwarg bool analytical: If True (default), solve analytically using the
coefficients of the stability function. Otherwise, solve
:math:`\alpha_M` numerically from the equilibrium condition.
"""
a_buoy = 0.0
if analytical:
a = self.d5 - self.n2
b = self.d2 - self.n0
c = self.d0
a_shear = (-b - np.sqrt(b**2 - 4*a*c))/2/a
s_m, s_h = self.eval_funcs(a_buoy, a_shear)
cm0 = s_m**0.25
else:
def cost(a_shear):
s_m, s_h = self.eval_funcs(a_buoy, a_shear)
res = s_m*a_shear - 1.0
return res**2
p = minimize(cost, 1.0)
assert p.success, 'solving alpha_shear failed'
a_shear = p.x[0]
s_m, s_h = self.eval_funcs(a_buoy, a_shear)
cm0 = s_m**0.25
return cm0
def compute_kappa(self, sigma_psi, cmu0, n, c1, c2):
r"""
Computes von Karman constant from the Psi Schmidt number.
See: Umlauf and Burchard (2003) eq (14)
:arg sigma_psi: Psi Schmidt number
:arg cmu0, n, c1, c2: GLS model parameters
"""
return cmu0 / np.abs(n) * np.sqrt(sigma_psi * (c2 - c1))
def compute_sigma_psi(self, kappa, cmu0, n, c1, c2):
r"""
Computes the Psi Schmidt number from von Karman constant.
See: Umlauf and Burchard (2003) eq (14)
:arg kappa: von Karman constant
:arg cmu0, n, c1, c2: GLS model parameters
"""
return (n * kappa)**2 / (cmu0**2 * (c2 - c1))
def compute_length_clim(self, cmu0, ri_st):
r"""
Computes the Galpering length scale limit.
:arg cmu0: parameter :math:`c_\mu^0`
:arg ri_st: gradient Richardson number
"""
a_shear = self.compute_alpha_shear_steady(ri_st)
# compute aN from Ri_st and aM, Ri_st = aN/aM
a_buoy = ri_st*a_shear
clim = cmu0**3.0 * np.sqrt(a_buoy/2)
return clim
def get_alpha_buoy_min(self):
r"""
Compute minimum normalized buoyancy frequency :math:`\alpha_N`
See: Umlauf and Buchard (2005), Table 3
"""
# G = epsilon case, this is used in GOTM
an_min = 0.5*(np.sqrt((self.d1 + self.nb0)**2. - 4.*self.d0*(self.d4 + self.nb1)) - (self.d1 + self.nb0))/(self.d4 + self.nb1)
# eq. (47)
# an_min = self.alpha_buoy_min
return an_min
def get_alpha_shear_max(self, alpha_buoy, alpha_shear):
r"""
Compute maximum normalized shear frequency :math:`\alpha_M`
from Umlauf and Buchard (2005) eq (44)
:arg alpha_buoy: normalized buoyancy frequency :math:`\alpha_N`
:arg alpha_shear: normalized shear frequency :math:`\alpha_M`
"""
as_max_n = (self.d0*self.n0
+ (self.d0*self.n1 + self.d1*self.n0)*alpha_buoy
+ (self.d1*self.n1 + self.d4*self.n0)*alpha_buoy**2
+ self.d4*self.n1*alpha_buoy**3)
as_max_d = (self.d2*self.n0
+ (self.d2*self.n1 + self.d3*self.n0)*alpha_buoy
+ self.d3*self.n1*alpha_buoy**2)
return as_max_n/as_max_d
def get_alpha_buoy_smooth_min(self, alpha_buoy):
r"""
Compute smoothed minimum for normalized buoyancy frequency
See: Burchard and Petersen (1999), eq (19)
:arg alpha_buoy: normalized buoyancy frequency :math:`\alpha_N`
"""
return alpha_buoy - (alpha_buoy - self.alpha_buoy_crit)**2/(alpha_buoy + self.get_alpha_buoy_min() - 2*self.alpha_buoy_crit)
def eval_funcs(self, alpha_buoy, alpha_shear):
r"""
Evaluate (unlimited) stability functions
See: Burchard and Petersen (1999) eqns (30) and (31)
:arg alpha_buoy: normalized buoyancy frequency :math:`\alpha_N`
:arg alpha_shear: normalized shear frequency :math:`\alpha_M`
:returns: :math:`S_m`, :math:`S_\rho`
"""
den = self.d0 + self.d1*alpha_buoy + self.d2*alpha_shear + self.d3*alpha_buoy*alpha_shear + self.d4*alpha_buoy**2 + self.d5*alpha_shear**2
c_mu = (self.n0 + self.n1*alpha_buoy + self.n2*alpha_shear) / den
c_mu_p = (self.nb0 + self.nb1*alpha_buoy + self.nb2*alpha_shear) / den
return c_mu, c_mu_p
def evaluate(self, shear2, buoy2, k, eps):
r"""
Evaluate stability functions from dimensional variables.
Applies limiters on :math:`\alpha_N` and :math:`\alpha_M`.
:arg shear2: shear frequency squared, :math:`M^2`
:arg buoy2: buoyancy frequency squared,:math:`N^2`
:arg k: turbulent kinetic energy, :math:`k`
:arg eps: TKE dissipation rate, :math:`\varepsilon`
"""
alpha_buoy, alpha_shear = compute_normalized_frequencies(shear2, buoy2, k, eps)
if self.lim_alpha_buoy:
# limit min (negative) alpha_buoy (Umlauf and Burchard, 2005)
if not self.smooth_alpha_buoy_lim:
# crop at minimum value
np.maximum(alpha_buoy, self.get_alpha_buoy_min(), alpha_buoy)
else:
# do smooth limiting instead (Buchard and Petersen, 1999, eq 19)
ab_smooth_min = self.get_alpha_buoy_smooth_min(alpha_buoy)
# NOTE this must be applied to values alpha_buoy < ab_crit only!
ix = alpha_buoy < self.alpha_buoy_crit
alpha_buoy[ix] = ab_smooth_min[ix]
if self.lim_alpha_shear:
# limit max alpha_shear (Umlauf and Burchard, 2005, eq 44)
as_max = self.get_alpha_shear_max(alpha_buoy, alpha_shear)
np.minimum(alpha_shear, as_max, alpha_shear)
return self.eval_funcs(alpha_buoy, alpha_shear)
class GOTMStabilityFunctionBase(StabilityFunctionBase, ABC):
"""
Base class for stability functions defined in Umlauf and Buchard (2005)
"""
@property
@abstractmethod
def cc1(self):
pass
@property
@abstractmethod
def cc2(self):
pass
@property
@abstractmethod
def cc3(self):
pass
@property
@abstractmethod
def cc4(self):
pass
@property
@abstractmethod
def cc5(self):
pass
@property
@abstractmethod
def cc6(self):
pass
@property
@abstractmethod
def cb1(self):
pass
@property
@abstractmethod
def cb2(self):
pass
@property
@abstractmethod
def cb3(self):
pass
@property
@abstractmethod
def cb4(self):
pass
@property
@abstractmethod
def cb5(self):
pass
@property
@abstractmethod
def cbb(self):
pass
def __init__(self, lim_alpha_shear=True, lim_alpha_buoy=True,
smooth_alpha_buoy_lim=True, alpha_buoy_crit=-1.2):
r"""
:kwarg bool lim_alpha_shear: limit maximum :math:`\alpha_M` values
(see Umlauf and Burchard (2005) eq. 44)
:kwarg bool lim_alpha_buoy: limit minimum (negative) :math:`\alpha_N` values
(see Umlauf and Burchard (2005))
:kwarg bool smooth_alpha_buoy_lim: if :math:`\alpha_N` is limited, apply a
smooth limiter (see Burchard and Bolding (2001) eq. 19). Otherwise
:math:`\alpha_N` is clipped at minimum value.
:kwarg float alpha_buoy_crit: parameter for :math:`\alpha_N` smooth limiter
"""
super().__init__(lim_alpha_shear, lim_alpha_buoy,
smooth_alpha_buoy_lim, alpha_buoy_crit)
# Umlauf and Buchard (2005) eq A.10
self.a1 = 2.0/3.0 - 0.5*self.cc2
self.a2 = 1.0 - 0.5*self.cc3
self.a3 = 1.0 - 0.5*self.cc4
self.a4 = 0.5*self.cc5
self.a5 = 0.5 - 0.5*self.cc6
self.ab1 = 1.0 - self.cb2
self.ab2 = 1.0 - self.cb3
self.ab3 = 2.0*(1.0 - self.cb4)
self.ab4 = 2.0*(1.0 - self.cb5)
self.ab5 = 2.0*self.cbb*(1.0 - self.cb5)
# Umlauf and Buchard (2005) eq A.12
self.nn = 0.5*self.cc1
self.nb = self.cb1
# Umlauf and Buchard (2005) eq A.9
self.d0 = 36.0*self.nn**3*self.nb**2
self.d1 = 84.0*self.a5*self.ab3*self.nn**2*self.nb + 36.0*self.ab5*self.nn**3*self.nb
self.d2 = 9.0*(self.ab2**2 - self.ab1**2)*self.nn**3 - 12.0*(self.a2**2 - 3.0*self.a3**2)*self.nn*self.nb**2
self.d3 = 12.0*self.a5*self.ab3*(self.a2*self.ab1 - 3.0*self.a3*self.ab2)*self.nn +\
12.0*self.a5*self.ab3*(self.a3**2 - self.a2**2)*self.nb +\
12.0*self.ab5*(3.0*self.a3**2 - self.a2**2)*self.nn*self.nb
self.d4 = 48.0*self.a5**2*self.ab3**2*self.nn + 36.0*self.a5*self.ab3*self.ab5*self.nn**2
self.d5 = 3.0*(self.a2**2 - 3.0*self.a3**2)*(self.ab1**2 - self.ab2**2)*self.nn
self.n0 = 36.0*self.a1*self.nn**2*self.nb**2
self.n1 = -12.0*self.a5*self.ab3*(self.ab1 + self.ab2)*self.nn**2 +\
8.0*self.a5*self.ab3*(6.0*self.a1 - self.a2 - 3.0*self.a3)*self.nn*self.nb +\
36.0*self.a1*self.ab5*self.nn**2*self.nb
self.n2 = 9.0*self.a1*(self.ab2**2 - self.ab1**2)*self.nn**2
self.nb0 = 12.0*self.ab3*self.nn**3*self.nb
self.nb1 = 12.0*self.a5*self.ab3**2*self.nn**2
self.nb2 = 9.0*self.a1*self.ab3*(self.ab1 - self.ab2)*self.nn**2 +\
(6.0*self.a1*(self.a2 - 3.0*self.a3) - 4.0*(self.a2**2 - 3.0*self.a3**2))*self.ab3*self.nn*self.nb
class CanutoStabilityFunctionBase(StabilityFunctionBase, ABC):
"""
Base class for original Canuto stability function.
"""
@property
@abstractmethod
def l1(self):
pass
@property
@abstractmethod
def l2(self):
pass
@property
@abstractmethod
def l3(self):
pass
@property
@abstractmethod
def l4(self):
pass
@property
@abstractmethod
def l5(self):
pass
@property
@abstractmethod
def l6(self):
pass
@property
@abstractmethod
def l7(self):
pass
@property
@abstractmethod
def l8(self):
pass
def __init__(self, lim_alpha_shear=True, lim_alpha_buoy=True,
smooth_alpha_buoy_lim=True, alpha_buoy_crit=-1.2):
r"""
:kwarg bool lim_alpha_shear: limit maximum :math:`\alpha_M` values
(see Umlauf and Burchard (2005) eq. 44)
:kwarg bool lim_alpha_buoy: limit minimum (negative) :math:`\alpha_N` values
(see Umlauf and Burchard (2005))
:kwarg bool smooth_alpha_buoy_lim: if :math:`\alpha_N` is limited, apply a
smooth limiter (see Burchard and Bolding (2001) eq. 19). Otherwise
:math:`\alpha_N` is clipped at minimum value.
:kwarg float alpha_buoy_crit: parameter for :math:`\alpha_N` smooth limiter
"""
super().__init__(lim_alpha_shear, lim_alpha_buoy,
smooth_alpha_buoy_lim, alpha_buoy_crit)
self.s0 = 1.5*self.l1*self.l5**2
self.s1 = -self.l4*(self.l6 + self.l7) + 2*self.l4*self.l5*(self.l1 - self.l2/3.0 - self.l3) + 1.5*self.l1*self.l5*self.l8
self.s2 = -3.0/8*self.l1*(self.l6**2 - self.l7**2)
self.s4 = 2*self.l5
self.s5 = 2*self.l4
self.s6 = 2.0/3*self.l5*(3*self.l3**2 - self.l2**2) - 0.5*self.l5*self.l1*(3*self.l3 - self.l2) + 0.75*self.l1*(self.l6 - self.l7)
self.dd0 = 3*self.l5**2
self.dd1 = self.l5*(7*self.l4 + 3*self.l8)
self.dd2 = self.l5**2*(3*self.l3**2 - self.l2**2) - 0.75*(self.l6**2 - self.l7**2)
self.dd3 = self.l4*(4*self.l4 + 3*self.l8)
self.dd5 = 0.25*(self.l2**2 - 3*self.l3**2)*(self.l6**2 - self.l7**2)
self.dd4 = self.l4*(self.l2*self.l6 - 3*self.l3*self.l7 - self.l5*(self.l2**2 - self.l3**2)) + self.l5*self.l8*(3*self.l3**2 - self.l2**2)
# unit conversion
self.alpha_scalar = 4
self.cu_scalar = 2
self.d0 = self.dd0
self.d1 = self.alpha_scalar*self.dd1
self.d2 = self.alpha_scalar*self.dd2
self.d3 = self.alpha_scalar**2*self.dd4
self.d4 = self.alpha_scalar**2*self.dd3
self.d5 = self.alpha_scalar**2*self.dd5
self.n0 = self.cu_scalar*self.s0
self.n1 = self.cu_scalar*self.alpha_scalar*self.s1
self.n2 = self.cu_scalar*self.alpha_scalar*self.s2
self.nb0 = self.cu_scalar*self.s4
self.nb1 = self.cu_scalar*self.alpha_scalar*self.s5
self.nb2 = self.cu_scalar*self.alpha_scalar*self.s6
def eval_funcs_new(self, alpha_buoy, alpha_shear):
r"""
Evaluate (unlimited) stability functions
From Canuto et al (2001)
:arg alpha_buoy: normalized buoyancy frequency :math:`\alpha_N`
:arg alpha_shear: normalized shear frequency :math:`\alpha_M`
"""
tN2 = self.alpha_scalar*alpha_buoy
tS2 = self.alpha_scalar*alpha_shear
dsm = self.s0 + self.s1*tN2 + self.s2*tS2
dsh = self.s4 + self.s5*tN2 + self.s6*tS2
den = self.dd0 + self.dd1*tN2 + self.dd2*tS2 + self.dd3*tN2**2 + self.dd4*tN2*tS2 + self.dd5*tS2**2
sm = self.cu_scalar*dsm/den
sh = self.cu_scalar*dsh/den
return sm, sh
class ChengStabilityFunctionBase(StabilityFunctionBase):
"""
Base class for original Cheng stability function.
"""
@property
@abstractmethod
def l1(self):
pass
@property
@abstractmethod
def l2(self):
pass
@property
@abstractmethod
def l3(self):
pass
@property
@abstractmethod
def l4(self):
pass
@property
@abstractmethod
def l5(self):
pass
@property
@abstractmethod
def l6(self):
pass
@property
@abstractmethod
def l7(self):
pass
@property
@abstractmethod
def l8(self):
pass
def __init__(self, lim_alpha_shear=True, lim_alpha_buoy=True,
smooth_alpha_buoy_lim=True, alpha_buoy_crit=-1.2):
r"""
:kwarg bool lim_alpha_shear: limit maximum :math:`\alpha_M` values
(see Umlauf and Burchard (2005) eq. 44)
:kwarg bool lim_alpha_buoy: limit minimum (negative) :math:`\alpha_N` values
(see Umlauf and Burchard (2005))
:kwarg bool smooth_alpha_buoy_lim: if :math:`\alpha_N` is limited, apply a
smooth limiter (see Burchard and Bolding (2001) eq. 19). Otherwise
:math:`\alpha_N` is clipped at minimum value.
:kwarg float alpha_buoy_crit: parameter for :math:`\alpha_N` smooth limiter
"""
super().__init__(lim_alpha_shear, lim_alpha_buoy,
smooth_alpha_buoy_lim, alpha_buoy_crit)
self.s0 = 0.5*self.l1
self.s1 = -1.0/3*self.l4*self.l5**-2*(self.l6 + self.l7) + 2.0/3*self.l4/self.l5*(self.l1 - 1.0/3*self.l2 - self.l3) + 0.5*self.l1/self.l5*self.l8
self.s2 = 1.0/8*self.l1*self.l5**-2*(self.l6**2 - self.l7**2)
self.s4 = 2.0/3/self.l5
self.s5 = 2.0/3*self.l4*self.l5**-2
self.s6 = 2.0/3/self.l5*(self.l3**2 - 1.0/3*self.l2**2) - 0.5*self.l1/self.l5*(self.l3 - 1.0/3*self.l2)
self.dd0 = 1
self.dd1 = (7.0/3*self.l4 + self.l8)/self.l5
self.dd2 = (self.l3**2 - 1.0/3*self.l2**2) - 0.25*self.l5**-2*(self.l6**2 - self.l7**2)
self.dd3 = 1.0/3*self.l4*self.l5**-2*(4*self.l4 + 3*self.l8)
self.dd4 = 1.0/3*self.l4*self.l5**-2*(self.l2*self.l6 - 3*self.l3*self.l7 - self.l5*(self.l2**2 - self.l3**2)) + self.l8*(self.l3**2 - 1.0/3*self.l2**2)/self.l5
self.dd5 = -1.0/4*self.l5**-2*(self.l3**2 - 1.0/3*self.l2**2)*(self.l6**2 - self.l7**2)
# unit conversion
self.alpha_scalar = 4
self.cu_scalar = 2
self.d0 = self.dd0
self.d1 = self.alpha_scalar*self.dd1
self.d2 = self.alpha_scalar*self.dd2
self.d3 = self.alpha_scalar**2*self.dd4
self.d4 = self.alpha_scalar**2*self.dd3
self.d5 = self.alpha_scalar**2*self.dd5
self.n0 = self.cu_scalar*self.s0
self.n1 = self.cu_scalar*self.alpha_scalar*self.s1
self.n2 = self.cu_scalar*self.alpha_scalar*self.s2
self.nb0 = self.cu_scalar*self.s4
self.nb1 = self.cu_scalar*self.alpha_scalar*self.s5
self.nb2 = self.cu_scalar*self.alpha_scalar*self.s6
def eval_funcs_new(self, alpha_buoy, alpha_shear):
r"""
Evaluate (unlimited) stability functions
From Canuto et al (2001)
:arg alpha_buoy: normalized buoyancy frequency :math:`\alpha_N`
:arg alpha_shear: normalized shear frequency :math:`\alpha_M`
"""
tN2 = self.alpha_scalar*alpha_buoy
tS2 = self.alpha_scalar*alpha_shear
dsm = self.s0 + self.s1*tN2 + self.s2*tS2
dsh = self.s4 + self.s5*tN2 + self.s6*tS2
den = self.dd0 + self.dd1*tN2 + self.dd2*tS2 + self.dd3*tN2**2 + self.dd4*tN2*tS2 + self.dd5*tS2**2
sm = self.cu_scalar*dsm/den
sh = self.cu_scalar*dsh/den
return sm, sh
class StabilityFunctionCanutoA(CanutoStabilityFunctionBase):
"""
Canuto A stability function as defined in the Canuto (2001) paper.
"""
l1 = 0.107
l2 = 0.0032
l3 = 0.0864
l4 = 0.12
l5 = 11.9
l6 = 0.4
l7 = 0
l8 = 0.48
name = 'Canuto A'
class StabilityFunctionCanutoB(CanutoStabilityFunctionBase):
"""
Canuto B stability function as defined in the Canuto (2001) paper.
"""
l1 = 0.127
l2 = 0.00336
l3 = 0.0906
l4 = 0.101
l5 = 11.2
l6 = 0.4
l7 = 0
l8 = 0.318
name = 'Canuto B'
class StabilityFunctionCheng(ChengStabilityFunctionBase):
"""
Cheng stability function as defined in the Cheng et al (2002) paper.
"""
l1 = 0.107
l2 = 0.0032
l3 = 0.0864
l4 = 0.1
l5 = 11.04
l6 = 0.786
l7 = 0.643
l8 = 0.547
name = 'Cheng'
class GOTMStabilityFunctionCanutoA(GOTMStabilityFunctionBase):
"""
Canuto et al. (2001) version A stability functions
Parameters are from Umlauf and Buchard (2005), Table 1
"""
cc1 = 5.0000
cc2 = 0.8000
cc3 = 1.9680
cc4 = 1.1360
cc5 = 0.0000
cc6 = 0.4000
cb1 = 5.9500
cb2 = 0.6000
cb3 = 1.0000
cb4 = 0.0000
cb5 = 0.3333
cbb = 0.7200
name = 'Canuto A'
class GOTMStabilityFunctionCanutoB(GOTMStabilityFunctionBase):
"""
Canuto et al. (2001) version B stability functions
Parameters are from Umlauf and Buchard (2005), Table 1
"""
cc1 = 5.0000
cc2 = 0.6983
cc3 = 1.9664
cc4 = 1.0940
cc5 = 0.0000
cc6 = 0.4950
cb1 = 5.6000
cb2 = 0.6000
cb3 = 1.0000
cb4 = 0.0000
cb5 = 0.3333
cbb = 0.4770
name = 'Canuto B'
class GOTMStabilityFunctionKanthaClayson(GOTMStabilityFunctionBase):
"""
Kantha and Clayson (1994) quasi-equilibrium stability functions
Parameters are from Umlauf and Buchard (2005), Table 1
"""
cc1 = 6.0000
cc2 = 0.3200
cc3 = 0.0000
cc4 = 0.0000
cc5 = 0.0000
cc6 = 0.0000
cb1 = 3.7280
cb2 = 0.7000
cb3 = 0.7000
cb4 = 0.0000
cb5 = 0.2000
cbb = 0.6102
name = 'Kantha Clayson'
class GOTMStabilityFunctionCheng(GOTMStabilityFunctionBase):
"""
Cheng et al. (2002) quasi-equilibrium stability functions
Parameters are from Umlauf and Buchard (2005), Table 1
"""
cc1 = 5.0000
cc2 = 0.7983
cc3 = 1.9680
cc4 = 1.1360
cc5 = 0.0000
cc6 = 0.5000
cb1 = 5.5200
cb2 = 0.2134
cb3 = 0.3570
cb4 = 0.0000
cb5 = 0.3333
cbb = 0.8200
name = 'Cheng'
|
|
# -*- coding: utf-8 -*-
from helper import IcebergUnitTestCase, get_api_handler
from helpers.login_utils import IcebergLoginUtils
class WebhookTestCase(IcebergUnitTestCase):
@classmethod
def setUpClass(cls):
cls.my_context_dict = {}
cls._objects_to_delete = []
cls.api_handler = get_api_handler()
IcebergLoginUtils.direct_login_user_1(handler = cls.api_handler)
# Create an application
application = cls.api_handler.Application()
application.name = "test-webhook-app"
application.contact_user = cls.api_handler.User.me()
application.save()
cls.my_context_dict['application'] = application
cls._objects_to_delete.append(application)
# Create a merchant
merchant = cls.api_handler.Store()
merchant.name = "Test Webhook Merchant"
merchant.application = application
merchant.save()
cls.my_context_dict['merchant'] = merchant
cls._objects_to_delete.append(merchant)
cls.my_context_dict['application_token'] = application.auth_me()
def test_01_create_update_webhook(self):
"""
Create and update a new_merchant_available webhook
"""
self.login_user_1()
self.api_handler.access_token = self.my_context_dict['application_token']
application = self.my_context_dict['application']
webhook = self.create_webhook(
application=application,
event="new_merchant_available",
url="http://api.iceberg.technology",
)
webhook.url = webhook.get_test_endpoint_url() ## to test update
webhook.save()
self.my_context_dict['webhook_new_merchant'] = webhook
def test_02_trigger_test_webhook(self):
"""
Trigger the test option on the webhook
"""
self.login_user_1()
self.api_handler.access_token = self.my_context_dict['application_token']
webhook = self.my_context_dict['webhook_new_merchant']
webhook.test_trigger()
webhook_triggers = webhook.wait_for_triggers()
self.assertEquals(len(webhook_triggers), 1)
webhook_trigger = webhook_triggers[0]
self.assertTrue(webhook_trigger.is_test)
self.assertEquals(webhook_trigger.status, "succeeded")
def test_03_trigger_new_merchant_available(self):
"""
Test new_merchant_available triggering when creating a new merchant
"""
self.login_user_1()
self.api_handler.access_token = self.my_context_dict['application_token']
webhook = self.my_context_dict['webhook_new_merchant']
new_merchant = self.create_merchant(application=self.my_context_dict['application'])
webhook_triggers = webhook.wait_for_triggers(number_of_triggers_expected=2)
self.assertEquals(len(webhook_triggers), 2)
webhook_trigger = webhook_triggers[0]
self.assertFalse(webhook_trigger.is_test)
self.assertEquals(webhook_trigger.status, "succeeded")
webhook_trigger.fetch() ## fetch detail to get payload
self.assertEqual(new_merchant.resource_uri, webhook_trigger.payload.get("resource_uri"))
def test_04_delete_webhook(self):
"""
Delete the webhook
"""
self.login_user_1()
self.api_handler.access_token = self.my_context_dict['application_token']
webhook = self.my_context_dict['webhook_new_merchant']
webhook.delete()
if webhook in self._objects_to_delete:
## no need to delete it in tearDownClass if delete succeeded
self._objects_to_delete.remove(webhook)
def test_05_create_product_and_webhook(self):
"""
Create and update a product_offer_updated webhook
"""
self.login_user_1()
self.api_handler.access_token = self.my_context_dict['application_token']
application = self.my_context_dict['application']
webhook_offer = self.create_webhook(
application=application,
event="product_offer_updated",
url="http://api.iceberg.technology",
active_merchant_only=False,
)
webhook_offer.url = webhook_offer.get_test_endpoint_url()
webhook_offer.save() ## update
self.assertEquals(webhook_offer.active_merchant_only, False)
webhook_product = self.create_webhook(
application=application,
event="product_updated",
url="http://api.iceberg.technology",
active_merchant_only=False,
)
webhook_product.url = webhook_product.get_test_endpoint_url()
webhook_product.save() ## update
self.assertEquals(webhook_product.active_merchant_only, False)
self.my_context_dict['webhook_offer_updated'] = webhook_offer
self.my_context_dict['webhook_product_updated'] = webhook_product
def test_06_trigger_product_offer_updated(self):
"""
Test product_offer_updated/product_updated triggering when updating a product_offer
"""
self.direct_login_user_1()
try:
brand = self.api_handler.Brand.find(1)
except:
brand = self.api_handler.Brand()
brand.name = "Test Brand"
brand.save()
product = self.create_product(
name = "Test Product",
description = "Description of my test product",
gender = "W",
categories=[50], # chemisier
brand=brand
)
self.my_context_dict['product'] = product
productoffer = self.create_product_offer(
product = self.my_context_dict['product'],
merchant = self.my_context_dict['merchant'],
sku = self.get_random_sku(),
price = "90",
image_paths = ["./tests/static/image_test.JPEG"]
)
self.my_context_dict['offer'] = productoffer
productoffer.activate()
self.assertEquals(productoffer.status, "active")
productoffer.price = 80
productoffer.save()
self.login_user_1()
self.api_handler.access_token = self.my_context_dict['application_token']
webhook_offer = self.my_context_dict['webhook_offer_updated']
webhook_triggers = webhook_offer.wait_for_triggers(number_of_triggers_expected=2)
self.assertEquals(len(webhook_triggers), 2)
first_webhook_trigger = webhook_triggers[1]
first_webhook_trigger.fetch() ## fetch detail to get payload
second_webhook_trigger = webhook_triggers[0]
second_webhook_trigger.fetch() ## fetch detail to get payload
webhook_attempts = first_webhook_trigger.attempts(response_code__gte=200, response_code__lte=205)
self.assertEquals(len(webhook_attempts), 1)
self.assertEqual(productoffer.resource_uri, first_webhook_trigger.payload.get("resource_uri"))
self.assertEqual(first_webhook_trigger.payload.get("updated_attributes"), [u"status"])
self.assertEqual(first_webhook_trigger.payload.get("status"), u"active")
webhook_attempts = second_webhook_trigger.attempts(response_code__gte=200, response_code__lte=205)
self.assertEquals(len(webhook_attempts), 1)
self.assertEqual(productoffer.resource_uri, second_webhook_trigger.payload.get("resource_uri"))
self.assertEqual(
set(second_webhook_trigger.payload.get("updated_attributes")),
set([u"price", u"price_with_vat", u"price_without_vat"])
)
self.assertEqual(float(second_webhook_trigger.payload.get("price")), 80.00)
webhook_product = self.my_context_dict['webhook_product_updated']
webhook_triggers = webhook_product.wait_for_triggers(number_of_triggers_expected=2)
self.assertEquals(len(webhook_triggers), 2)
webhook_trigger = webhook_triggers[0]
webhook_trigger.fetch() ## fetch detail to get payload
webhook_attempts = webhook_trigger.attempts(response_code__gte=200, response_code__lte=205)
self.assertEquals(len(webhook_attempts), 1)
self.assertEqual(product.resource_uri, webhook_trigger.payload.get("resource_uri"))
self.assertEqual(webhook_trigger.payload.get("updated_attributes"), [u"offers"])
def test_07_trigger_product_offer_updated_2(self):
"""
Test product_offer_updated/product_updated triggering when removing price (status should go to draft)
"""
self.login_user_1()
self.api_handler.access_token = self.my_context_dict['application_token']
webhook_offer = self.my_context_dict['webhook_offer_updated']
webhook_product = self.my_context_dict['webhook_product_updated']
productoffer = self.my_context_dict['offer']
product = self.my_context_dict['product']
productoffer.price = 0
productoffer.save() ## status should go to draft and trigger the webhook
webhook_triggers = webhook_offer.wait_for_triggers(number_of_triggers_expected=3)
self.assertEquals(len(webhook_triggers), 3)
# print "webhook_triggers = %s" % [wt.payload.get("updated_attributes") for wt in webhook_triggers]
webhook_trigger = webhook_triggers[0]
webhook_trigger.fetch() ## fetch detail to get payload
self.assertEqual(productoffer.resource_uri, webhook_trigger.payload.get("resource_uri"))
self.assertEqual(
set(webhook_trigger.payload.get("updated_attributes",[])),
set([u"status", u"price", u"price_with_vat", u"price_without_vat"])
)
self.assertEqual(webhook_trigger.payload.get("status"), u"draft")
self.assertEqual(float(webhook_trigger.payload.get("price")), 0.)
webhook_triggers = webhook_product.wait_for_triggers(number_of_triggers_expected=3)
self.assertEquals(len(webhook_triggers), 3)
# print "webhook_triggers = %s" % [wt.payload.get("updated_attributes") for wt in webhook_triggers]
webhook_trigger = webhook_triggers[0]
webhook_trigger.fetch() ## fetch detail to get payload
self.assertEqual(product.resource_uri, webhook_trigger.payload.get("resource_uri"))
self.assertEqual(webhook_trigger.payload.get("updated_attributes"), [u"offers"])
|
|
import uuid, json
from flask import Blueprint, request, url_for, flash, redirect, make_response
from flask import render_template, abort
from flask.ext.login import login_user, logout_user, current_user
from flask_wtf import Form
from wtforms.fields import TextField, TextAreaField, SelectField, HiddenField, PasswordField
from wtforms import validators, ValidationError
from portality.core import app
import portality.models as models
import portality.util as util
blueprint = Blueprint('account', __name__)
if len(app.config.get('SUPER_USER',[])) > 0:
firstsu = app.config['SUPER_USER'][0]
if models.Account.pull(firstsu) is None:
su = models.Account(id=firstsu)
su.set_password(firstsu)
su.save()
print 'superuser account named - ' + firstsu + ' created.'
print 'default password matches username. Change it.'
@blueprint.route('/')
def index():
if current_user.is_anonymous():
abort(401)
users = models.Account.query() #{"sort":{'id':{'order':'asc'}}},size=1000000
if users['hits']['total'] != 0:
accs = [models.Account.pull(i['_source']['id']) for i in users['hits']['hits']]
# explicitly mapped to ensure no leakage of sensitive data. augment as necessary
users = []
for acc in accs:
user = {'id':acc.id}
if 'created_date' in acc.data:
user['created_date'] = acc.data['created_date']
users.append(user)
if util.request_wants_json():
resp = make_response( json.dumps(users, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('account/users.html', users=users)
@blueprint.route('/<username>', methods=['GET','POST', 'DELETE'])
def username(username):
acc = models.Account.pull(username)
if acc is None:
abort(404)
elif ( request.method == 'DELETE' or
( request.method == 'POST' and
request.values.get('submit',False) == 'Delete' ) ):
if current_user.id != acc.id and not current_user.is_super:
abort(401)
else:
acc.delete()
flash('Account ' + acc.id + ' deleted')
return redirect(url_for('.index'))
elif request.method == 'POST':
if current_user.id != acc.id and not current_user.is_super:
abort(401)
newdata = request.json if request.json else request.values
if newdata.get('id',False):
if newdata['id'] != username:
acc = models.Account.pull(newdata['id'])
else:
newdata['api_key'] = acc.data['api_key']
for k, v in newdata.items():
if k not in ['submit','password']:
acc.data[k] = v
if 'password' in newdata and not newdata['password'].startswith('sha1'):
acc.set_password(newdata['password'])
acc.save()
flash("Record updated")
return render_template('account/view.html', account=acc)
else:
if util.request_wants_json():
resp = make_response(
json.dumps(acc.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('account/view.html', account=acc)
def get_redirect_target():
for target in request.args.get('next'), request.referrer:
if not target:
continue
if target == util.is_safe_url(target):
return target
class RedirectForm(Form):
next = HiddenField()
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
if not self.next.data:
self.next.data = get_redirect_target() or ''
def redirect(self, endpoint='index', **values):
if self.next.data == util.is_safe_url(self.next.data):
return redirect(self.next.data)
target = get_redirect_target()
return redirect(target or url_for(endpoint, **values))
class LoginForm(RedirectForm):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form, csrf_enabled=False)
if request.method == 'POST' and form.validate():
password = form.password.data
username = form.username.data
user = models.Account.pull(username)
if user is None:
user = models.Account.pull_by_email(username)
if user is not None and user.check_password(password):
login_user(user, remember=True)
flash('Welcome back.', 'success')
return form.redirect('index')
else:
flash('Incorrect username/password', 'error')
if request.method == 'POST' and not form.validate():
flash('Invalid form', 'error')
return render_template('account/login.html', form=form)
@blueprint.route('/forgot', methods=['GET', 'POST'])
def forgot():
if request.method == 'POST':
un = request.form.get('un',"")
account = models.Account.pull(un)
if account is None: account = models.Account.pull_by_email(un)
if account is None:
flash('Sorry, your account username / email address is not recognised. Please contact us.')
else:
newpass = util.generate_password()
account.set_password(newpass)
account.save()
to = [account.data['email'],app.config['ADMIN_EMAIL']]
fro = app.config['ADMIN_EMAIL']
subject = app.config.get("SERVICE_NAME","") + "password reset"
text = "A password reset request for account " + account.id + " has been received and processed.\n\n"
text += "The new password for this account is " + newpass + ".\n\n"
text += "If you are the user " + account.id + " and you requested this change, please login now and change the password again to something of your preference.\n\n"
text += "If you are the user " + account.id + " and you did NOT request this change, please contact us immediately.\n\n"
try:
util.send_mail(to=to, fro=fro, subject=subject, text=text)
flash('Your password has been reset. Please check your emails.')
if app.config.get('DEBUG',False):
flash('Debug mode - new password was set to ' + newpass)
except:
flash('Email failed.')
if app.config.get('DEBUG',False):
flash('Debug mode - new password was set to ' + newpass)
return render_template('account/forgot.html')
@blueprint.route('/logout')
def logout():
logout_user()
flash('You are now logged out', 'success')
return redirect('/')
def existscheck(form, field):
test = models.Account.pull(form.w.data)
if test:
raise ValidationError('Taken! Please try another.')
class RegisterForm(Form):
w = TextField('Username', [validators.Length(min=3, max=25),existscheck])
n = TextField('Email Address', [
validators.Length(min=3, max=35),
validators.Email(message='Must be a valid email address')
])
s = PasswordField('Password', [
validators.Required(),
validators.EqualTo('c', message='Passwords must match')
])
c = PasswordField('Repeat Password')
@blueprint.route('/register', methods=['GET', 'POST'])
def register():
if not app.config.get('PUBLIC_REGISTER',False) and not current_user.is_super:
abort(401)
form = RegisterForm(request.form, csrf_enabled=False)
if request.method == 'POST' and form.validate():
api_key = str(uuid.uuid4())
account = models.Account(
id=form.w.data,
email=form.n.data,
api_key=api_key
)
account.set_password(form.s.data)
account.save()
flash('Account created for ' + account.id + '. If not listed below, refresh the page to catch up.', 'success')
return redirect('/account')
if request.method == 'POST' and not form.validate():
flash('Please correct the errors', 'error')
return render_template('account/register.html', form=form)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.