repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Mariaanisimova/pythonintask | INBa/2015/Sarocvashin_M/task_12_23.py | 1 | 4811 | #Задача 12. Вариант 28.
#1-50. Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python гл. 6).
#Сароквашин Максим
#29.05.2016
X="X"
O="O"
EMPTY=" "
TIE="Ничья"
NUM_SQUARES=9
def display_instruct():
print('''
Добро пожаловать на ринг грандиознейших интеллектуальных состязаний всех времён.
Твой мозг и мой процессор сойдутся в схватке за доской игры "Крестики-нолики".
Чтобы сделать ход, введи число от 0 до 8. Числа однозначно соответствуют полям
доски - так, как показано ниже:
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8''')
def ask_yes_no(question):
response = None
while response not in ("y", "n"):
response = input(question).lower()
return response
def ask_number(question, low, high):
response = None
while response not in range(low, high):
response = int(input(question))
return response
def pieces():
go_first = ask_yes_no("Хочешь оставить за собой первый ход? (y/n): ")
if go_first == "y":
print("\nНу что ж, даю тебе фору: играй крестиками.")
human = X
computer = O
else:
print("\nТвоя удаль тебя погубит... Буду начинать я.")
computer = X
human = O
return computer, human
def new_board():
board = []
for square in range(NUM_SQUARES):
board.append(EMPTY)
return board
def display_board(board):
print("\n\t", board[0], "|", board[1], "|", board[2])
print("\t", "---------")
print("\t", board[3], "|", board[4], "|", board[5])
print("\t", "---------")
print("\t", board[6], "|", board[7], "|", board[8])
def legal_moves(board):
moves = []
for square in range(NUM_SQUARES):
if board[square] == EMPTY:
moves.append(square)
return moves
def winner(board):
WAYS_TO_WIN = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
def human_move(board, human):
legal = legal_moves(board)
move = None
while move not in legal:
move = ask_number("Твой ход. Выбери одно из полей (0-8):", 0, NUM_SQUARES)
if move not in legal:
print("\nСмешной человек! Это поле уже занято. Выбери другое.\n")
print("Ладно...")
return move
def computer_move(board, computer, human):
board = board[:]
BEST_MOVES = (4, 0, 2, 6, 8, 1, 3, 5, 7)
print("Я выберу поле номер", end = " ")
for move in legal_moves(board):
board[move] = computer
if winner(board) == computer:
print(move)
return move
board[move] = EMPTY
for move in legal_moves(board):
board[move] = human
if winner(board) == human:
print(move)
return move
board[move] = EMPTY
for move in BEST_MOVES:
if move in legal_moves(board):
print(move)
return move
def next_turn(turn):
if turn == X:
return O
else:
return X
def congrat_winner(the_winner, computer, human):
if the_winner != TIE:
print("Три", the_winner, "в ряд!\n")
else:
print("Ничья!\n")
if the_winner == computer:
print("Kaк я и предсказывал. победа в очередной раз осталась за мной.\nВот еще один довод в пользу того. что компьютеры превосходят людей решительно во всем.")
elif the_winner == human:
print("О нет, этого не может быть! Неужели ты как-то сумел перехитрить меня, белковый?\nКлянусь: я, компьютер, не допущу этого больше никогда!")
elif the_winner == TIE:
print("Тебе несказанно повезло, дружок: ты сумел свести игру вничью.\nРадуйся же сегодняшнему успеху! Завтра уже не суждено его повторить.")
def main():
display_instruct()
computer, human = pieces()
turn = X
board = new_board()
display_board(board)
while not winner(board):
if turn == human:
move = human_move(board, human)
board[move] = human
else:
move = computer_move(board, computer, human)
board[move] = computer
display_board(board)
turn=next_turn(turn)
the_winner=winner(board)
congrat_winner(the_winner, computer, human)
main()
input("\n\nНажмите Enter, чтобы выйти.")
| apache-2.0 |
madafoo/cjdns | node_build/dependencies/libuv/build/gyp/test/defines/gyptest-define-override.py | 239 | 1476 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a default gyp define can be overridden.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
# CMake loudly warns about passing '#' to the compiler and drops the define.
expect_stderr = ''
if test.format == 'cmake':
expect_stderr = (
"""WARNING: Preprocessor definitions containing '#' may not be passed on the"""
""" compiler command line because many compilers do not support it.\n"""
"""CMake is dropping a preprocessor definition: HASH_VALUE="a#1"\n"""
"""Consider defining the macro in a (configured) header file.\n\n""")
# Command-line define
test.run_gyp('defines.gyp', '-D', 'OS=fakeos')
test.build('defines.gyp', stderr=expect_stderr)
test.built_file_must_exist('fakeosprogram', type=test.EXECUTABLE)
# Clean up the exe so subsequent tests don't find an old exe.
os.remove(test.built_file_path('fakeosprogram', type=test.EXECUTABLE))
# Without "OS" override, fokeosprogram shouldn't be built.
test.run_gyp('defines.gyp')
test.build('defines.gyp', stderr=expect_stderr)
test.built_file_must_not_exist('fakeosprogram', type=test.EXECUTABLE)
# Environment define
os.environ['GYP_DEFINES'] = 'OS=fakeos'
test.run_gyp('defines.gyp')
test.build('defines.gyp', stderr=expect_stderr)
test.built_file_must_exist('fakeosprogram', type=test.EXECUTABLE)
test.pass_test()
| gpl-3.0 |
umanium/trafficmon | id/trafficmon/TrafficMain.py | 1 | 4091 | import os
import cv2
import numpy as np
import time
from backgroundsubtraction.KMeans import KMeans
from objectblob.ObjectBlobDetection import ObjectBlobDetection
from pixelcleaning.MorphologicalCleaning import MorphologicalCleaning
__author__ = 'Luqman'
def morphological(image):
cleaning_model = MorphologicalCleaning()
return cleaning_model
def test(algorithm, vid_src, file_name):
_, frame = vid_src.read()
used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
model = KMeans(used_frame, 3)
cleaning_model = algorithm(used_frame)
blob_detection = ObjectBlobDetection(used_frame)
n_frame = 0
image_resolution = (0, 0)
min_fps = -1
max_fps = -1
mean_fps = -1
real_fps = vid_src.get(cv2.cv.CV_CAP_PROP_FPS)
# vid_src.get(cv2.CV_CAP_PROP_FPS)
if not os.path.exists("saved_images/"+file_name):
os.makedirs("saved_images/"+file_name)
os.makedirs("saved_images/"+file_name+"/normal")
os.makedirs("saved_images/"+file_name+"/fg")
os.makedirs("saved_images/"+file_name+"/grayscale")
os.makedirs("saved_images/"+file_name+"/clean")
os.makedirs("saved_images/"+file_name+"/contour")
# applying background detection
while frame is not None:
time_start = time.time()
n_frame += 1
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/normal/"+repr(n_frame)+".jpg", frame)
used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
y, x = used_frame.shape
image_resolution = x, y
fg = model.apply(used_frame)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/fg/"+repr(n_frame)+".jpg", fg)
# cv2.imwrite("saved_images/"+file_name+"/grayscale/"+repr(n_frame)+".jpg", used_frame)
fg_use = np.copy(fg)
fg_clean = cleaning_model.apply(fg)
fg_clean_use = np.copy(fg_clean)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/clean/"+repr(n_frame)+".jpg", fg_clean)
# contours
blob_detection.get_contours(fg_clean_use, used_frame)
# cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)
frame_with_contours = blob_detection.draw_blobs(frame)
# print len(contours)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/contour/"+repr(n_frame)+".jpg", frame_with_contours)
time_end = time.time()
cv2.imshow('img', frame_with_contours)
cv2.imshow('fg', fg)
cv2.imshow('fg_clean', fg_clean)
# prev_frame = np.copy(frame)
_, frame = vid_src.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time_process = time_end - time_start
cur_fps = 0
if time_process > 0:
cur_fps = 1. / time_process
# set max / min / mean fps
if (cur_fps > max_fps) or (max_fps == -1):
max_fps = cur_fps
if (cur_fps < min_fps) or (min_fps == -1):
min_fps = cur_fps
if mean_fps == -1:
mean_fps = cur_fps
else:
mean_fps = (0.98 * mean_fps) + (0.02 * cur_fps)
print "--- run statistics ---"
print "image resolution: ", image_resolution
print "total frame: ", n_frame
print "min FPS: ", min_fps
print "max FPS: ", max_fps
print "average FPS: ", mean_fps
print "Video FPS: ", real_fps
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
video_src_file = sys.argv[1]
if len(sys.argv) >= 3:
exp_file_name = sys.argv[2]
else:
exp_file_name = "default"
else:
video_src_file = 0
exp_file_name = "default"
# run video
vid = cv2.VideoCapture(video_src_file)
test(morphological, vid, exp_file_name)
| mit |
slightperturbation/Cobalt | ext/emsdk_portable/clang/tag-e1.34.1/src/test/CodeGen/SystemZ/Large/branch-range-04.py | 9 | 3694 | # Test 64-bit COMPARE AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 16 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 1(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 2(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 3(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 4(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# ...as mentioned above, the next one could be a CGRJE instead...
# CHECK: lgb [[REG:%r[0-5]]], 5(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 6(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 7(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# ...main goes here...
# CHECK: lgb [[REG:%r[0-5]]], 25(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 26(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 27(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 28(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 29(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 30(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 31(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 32(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print 'define void @f1(i8 *%base, i8 *%stop, i64 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| apache-2.0 |
jvasilakes/ML_Programs | naive_bayes.py | 1 | 4044 | #! /usr/bin/python2
from __future__ import division
import numpy as np
from sys import exit
from collections import OrderedDict
# ------Training data-------------
# Politics
xP = [
[1, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 0, 1, 1, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 1, 1, 0, 0, 1]
]
# Sport
xS = [
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 1, 0]
]
# ---------------------------------
# --------Test data----------------
test_data = [1, 1, 1, 0, 1, 1, 0, 0]
# ---------------------------------
def main(xP, xS, test_data):
# Create attribute counts table for each class in training data
xP_attribute_counts = count_values(xP)
xS_attribute_counts = count_values(xS)
total_num_instances = len(xP) + len(xS)
xP_prior_prob = calc_prior_probability(xP, total_num_instances)
xS_prior_prob = calc_prior_probability(xS, total_num_instances)
likelihood_xP = calc_likelihood(xP, xP_attribute_counts, test_data)
likelihood_xS = calc_likelihood(xS, xS_attribute_counts, test_data)
total_attribute_probability = calc_attrib_prob(xP_attribute_counts,
xS_attribute_counts,
total_num_instances,
test_data)
xP_prob = calc_bayes_prob(xP_prior_prob,
likelihood_xP,
total_attribute_probability)
xS_prob = calc_bayes_prob(xS_prior_prob,
likelihood_xS,
total_attribute_probability)
if max(xP_prob, xS_prob) == xP_prob:
return "Politics"
else:
return "Sport"
def calc_prior_probability(clazz, total_num_instances):
"""
Calculates the prior probability for clazz
"""
num_instances = len(clazz)
prior_probability = (num_instances/total_num_instances)
return prior_probability
def calc_likelihood(clazz, attribute_counts, test_data):
"""
Calculates total likelihood for all attributes in
test data according to attribute_count
"""
likelihoods = []
for idx in range(len(test_data)):
likelihoods.append(attribute_counts[idx][test_data[idx]] / len(clazz))
return np.prod(likelihoods)
def calc_attrib_prob(xP_counts, xS_counts, total_instances, test_data):
"""
Calculates total probability for all attributes in test_data.
"""
attribute_probs = []
for idx in range(len(xP_counts)):
attribute_probs.append((xP_counts[idx][test_data[idx]]
+ xS_counts[idx][test_data[idx]])
/ total_instances)
return np.prod(attribute_probs)
def calc_bayes_prob(prior_prob, likelihood, total_attrib_prob):
return (prior_prob * likelihood) / total_attrib_prob
def count_values(attribute_vector):
"""
Creates an OrderedDict from each attribute vector
in clazz, totaling up the counts for each value
of each attribute in the attribute vector.
p = model_data(xP)
p.[3][1] gives the total count of 1"s for the attribute
at index 3 of all sets of xP
"""
# Initialize dict to hold counts for each variable value
# in each set of attribute_vector.
counts = OrderedDict.fromkeys([x for x in range(len(attribute_vector[0]))])
for key in counts.keys():
counts[key] = [0, 0]
# Count values
for set in attribute_vector:
for i in range(len(set)):
if set[i] == 0:
counts[i][0] += 1
elif set[i] == 1:
counts[i][1] += 1
else:
print "ERROR: INVALID VARIABLE VALUE"
exit(1)
return counts
if __name__ == "__main__":
print main(xP, xS, test_data)
| unlicense |
achang97/YouTunes | lib/python2.7/site-packages/pyasn1_modules/rfc3281.py | 12 | 9860 | # coding: utf-8
#
# This file is part of pyasn1-modules software.
#
# Created by Stanisław Pitucha with asn1ate tool.
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
# An Internet Attribute Certificate Profile for Authorization
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc3281.txt
#
from pyasn1.type import univ
from pyasn1.type import char
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import constraint
from pyasn1.type import useful
from pyasn1_modules import rfc3280
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
class ObjectDigestInfo(univ.Sequence):
pass
ObjectDigestInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('digestedObjectType', univ.Enumerated(
namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))),
namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()),
namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('objectDigest', univ.BitString())
)
class IssuerSerial(univ.Sequence):
pass
IssuerSerial.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', rfc3280.GeneralNames()),
namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()),
namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier())
)
class TargetCert(univ.Sequence):
pass
TargetCert.componentType = namedtype.NamedTypes(
namedtype.NamedType('targetCertificate', IssuerSerial()),
namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()),
namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
)
class Target(univ.Choice):
pass
Target.componentType = namedtype.NamedTypes(
namedtype.NamedType('targetName', rfc3280.GeneralName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('targetCert',
TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class Targets(univ.SequenceOf):
pass
Targets.componentType = Target()
class ProxyInfo(univ.SequenceOf):
pass
ProxyInfo.componentType = Targets()
id_at_role = _buildOid(rfc3280.id_at, 72)
id_pe_aaControls = _buildOid(rfc3280.id_pe, 6)
id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55)
id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4)
class ClassList(univ.BitString):
pass
ClassList.namedValues = namedval.NamedValues(
('unmarked', 0),
('unclassified', 1),
('restricted', 2),
('confidential', 3),
('secret', 4),
('topSecret', 5)
)
class SecurityCategory(univ.Sequence):
pass
SecurityCategory.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class Clearance(univ.Sequence):
pass
Clearance.componentType = namedtype.NamedTypes(
namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.DefaultedNamedType('classList',
ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)).subtype(
value="unclassified")),
namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class AttCertVersion(univ.Integer):
pass
AttCertVersion.namedValues = namedval.NamedValues(
('v2', 1)
)
id_aca = _buildOid(rfc3280.id_pkix, 10)
id_at_clearance = _buildOid(2, 5, 1, 5, 55)
class AttrSpec(univ.SequenceOf):
pass
AttrSpec.componentType = univ.ObjectIdentifier()
class AAControls(univ.Sequence):
pass
AAControls.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
namedtype.OptionalNamedType('permittedAttrs',
AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('excludedAttrs',
AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1))
)
class AttCertValidityPeriod(univ.Sequence):
pass
AttCertValidityPeriod.componentType = namedtype.NamedTypes(
namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
)
id_aca_authenticationInfo = _buildOid(id_aca, 1)
class V2Form(univ.Sequence):
pass
V2Form.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()),
namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class AttCertIssuer(univ.Choice):
pass
AttCertIssuer.componentType = namedtype.NamedTypes(
namedtype.NamedType('v1Form', rfc3280.GeneralNames()),
namedtype.NamedType('v2Form',
V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class Holder(univ.Sequence):
pass
Holder.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class AttributeCertificateInfo(univ.Sequence):
pass
AttributeCertificateInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', AttCertVersion()),
namedtype.NamedType('holder', Holder()),
namedtype.NamedType('issuer', AttCertIssuer()),
namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()),
namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
)
class AttributeCertificate(univ.Sequence):
pass
AttributeCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('acinfo', AttributeCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', univ.BitString())
)
id_mod = _buildOid(rfc3280.id_pkix, 0)
id_mod_attribute_cert = _buildOid(id_mod, 12)
id_aca_accessIdentity = _buildOid(id_aca, 2)
class RoleSyntax(univ.Sequence):
pass
RoleSyntax.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('roleName',
rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_aca_chargingIdentity = _buildOid(id_aca, 3)
class ACClearAttrs(univ.Sequence):
pass
ACClearAttrs.componentType = namedtype.NamedTypes(
namedtype.NamedType('acIssuer', rfc3280.GeneralName()),
namedtype.NamedType('acSerial', univ.Integer()),
namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute()))
)
id_aca_group = _buildOid(id_aca, 4)
id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10)
class SvceAuthInfo(univ.Sequence):
pass
SvceAuthInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('service', rfc3280.GeneralName()),
namedtype.NamedType('ident', rfc3280.GeneralName()),
namedtype.OptionalNamedType('authInfo', univ.OctetString())
)
class IetfAttrSyntax(univ.Sequence):
pass
IetfAttrSyntax.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType(
'values', univ.SequenceOf(
componentType=univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('octets', univ.OctetString()),
namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('string', char.UTF8String())
)
)
)
)
)
id_aca_encAttrs = _buildOid(id_aca, 6)
| mit |
bzhou26/leetcode_sol | p384_Shuffle_an_Array.py | 1 | 1759 | '''
- Leetcode problem: 384
- Difficulty: Medium
- Brief problem description:
Shuffle a set of numbers without duplicates.
Example:
// Init an array with set 1, 2, and 3.
int[] nums = {1,2,3};
Solution solution = new Solution(nums);
// Shuffle the array [1,2,3] and return its result. Any permutation of [1,2,3] must equally likely to be returned.
solution.shuffle();
// Resets the array back to its original configuration [1,2,3].
solution.reset();
// Returns the random shuffling of array [1,2,3].
solution.shuffle();
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
class Solution:
def __init__(self, nums: List[int]):
self.origin = nums[:]
self.arr = nums
def reset(self) -> List[int]:
"""
Resets the array to its original configuration and return it.
"""
return self.origin
def shuffle(self) -> List[int]:
"""
Returns a random shuffling of the array.
"""
for i in range(len(self.arr)):
l = i
r = len(self.arr) - 1 - i
ranLen = random.randint(0, max(l, r))
ranDirect = 1
j = i
if ranLen <= min(l, r):
ranDirect = random.randint(0, 1)
if l > r:
if ranDirect == 1:
j = i - ranLen
else:
j = i + ranLen
else:
if ranDirect == 1:
j = i + ranLen
else:
j = i - ranLen
self.arr[i], self.arr[j] = self.arr[j], self.arr[i]
return self.arr
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle() | mit |
lucywyman/slides-ii | v/lib/python2.7/site-packages/jinja2/__init__.py | 303 | 2326 | # -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.8'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
make_logging_undefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
]
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/applicationinsights/azure-applicationinsights/azure/applicationinsights/models/events_trace_result.py | 1 | 3361 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .events_result_data import EventsResultData
class EventsTraceResult(EventsResultData):
"""A trace result.
All required parameters must be populated in order to send to Azure.
:param id: The unique ID for this event.
:type id: str
:param count: Count of the event
:type count: long
:param timestamp: Timestamp of the event
:type timestamp: datetime
:param custom_dimensions: Custom dimensions of the event
:type custom_dimensions:
~azure.applicationinsights.models.EventsResultDataCustomDimensions
:param custom_measurements: Custom measurements of the event
:type custom_measurements:
~azure.applicationinsights.models.EventsResultDataCustomMeasurements
:param operation: Operation info of the event
:type operation: ~azure.applicationinsights.models.EventsOperationInfo
:param session: Session info of the event
:type session: ~azure.applicationinsights.models.EventsSessionInfo
:param user: User info of the event
:type user: ~azure.applicationinsights.models.EventsUserInfo
:param cloud: Cloud info of the event
:type cloud: ~azure.applicationinsights.models.EventsCloudInfo
:param ai: AI info of the event
:type ai: ~azure.applicationinsights.models.EventsAiInfo
:param application: Application info of the event
:type application: ~azure.applicationinsights.models.EventsApplicationInfo
:param client: Client info of the event
:type client: ~azure.applicationinsights.models.EventsClientInfo
:param type: Required. Constant filled by server.
:type type: str
:param trace:
:type trace: ~azure.applicationinsights.models.EventsTraceInfo
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'count': {'key': 'count', 'type': 'long'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'custom_dimensions': {'key': 'customDimensions', 'type': 'EventsResultDataCustomDimensions'},
'custom_measurements': {'key': 'customMeasurements', 'type': 'EventsResultDataCustomMeasurements'},
'operation': {'key': 'operation', 'type': 'EventsOperationInfo'},
'session': {'key': 'session', 'type': 'EventsSessionInfo'},
'user': {'key': 'user', 'type': 'EventsUserInfo'},
'cloud': {'key': 'cloud', 'type': 'EventsCloudInfo'},
'ai': {'key': 'ai', 'type': 'EventsAiInfo'},
'application': {'key': 'application', 'type': 'EventsApplicationInfo'},
'client': {'key': 'client', 'type': 'EventsClientInfo'},
'type': {'key': 'type', 'type': 'str'},
'trace': {'key': 'trace', 'type': 'EventsTraceInfo'},
}
def __init__(self, **kwargs):
super(EventsTraceResult, self).__init__(**kwargs)
self.trace = kwargs.get('trace', None)
self.type = 'trace'
| mit |
837468220/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_varsize_struct.py | 277 | 1842 | from ctypes import *
import unittest
class VarSizeTest(unittest.TestCase):
def test_resize(self):
class X(Structure):
_fields_ = [("item", c_int),
("array", c_int * 1)]
self.assertEqual(sizeof(X), sizeof(c_int) * 2)
x = X()
x.item = 42
x.array[0] = 100
self.assertEqual(sizeof(x), sizeof(c_int) * 2)
# make room for one additional item
new_size = sizeof(X) + sizeof(c_int) * 1
resize(x, new_size)
self.assertEqual(sizeof(x), new_size)
self.assertEqual((x.item, x.array[0]), (42, 100))
# make room for 10 additional items
new_size = sizeof(X) + sizeof(c_int) * 9
resize(x, new_size)
self.assertEqual(sizeof(x), new_size)
self.assertEqual((x.item, x.array[0]), (42, 100))
# make room for one additional item
new_size = sizeof(X) + sizeof(c_int) * 1
resize(x, new_size)
self.assertEqual(sizeof(x), new_size)
self.assertEqual((x.item, x.array[0]), (42, 100))
def test_array_invalid_length(self):
# cannot create arrays with non-positive size
self.assertRaises(ValueError, lambda: c_int * -1)
self.assertRaises(ValueError, lambda: c_int * -3)
def test_zerosized_array(self):
array = (c_int * 0)()
# accessing elements of zero-sized arrays raise IndexError
self.assertRaises(IndexError, array.__setitem__, 0, None)
self.assertRaises(IndexError, array.__getitem__, 0)
self.assertRaises(IndexError, array.__setitem__, 1, None)
self.assertRaises(IndexError, array.__getitem__, 1)
self.assertRaises(IndexError, array.__setitem__, -1, None)
self.assertRaises(IndexError, array.__getitem__, -1)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
Oi-Android/android_kernel_xiaomi_ferrari | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
hassanabidpk/django | django/utils/jslex.py | 251 | 7779 | """JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
| bsd-3-clause |
WaveBlocks/WaveBlocksND | WaveBlocksND/HagedornBasisEvaluationPsi.py | 1 | 6904 | """The WaveBlocks Project
The basic common algorithms for evaluation Hagedorn basis functions
of the new kind.
@author: R. Bourquin
@copyright: Copyright (C) 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import complexfloating, dot, vstack, zeros, identity, diag, real
from numpy.linalg import eigh
from scipy import sqrt
from scipy.linalg import det, polar
from WaveBlocksND.HagedornBasisEvaluationCommon import HagedornBasisEvaluationCommon
__all__ = ["HagedornBasisEvaluationPsi"]
class HagedornBasisEvaluationPsi(HagedornBasisEvaluationCommon):
r"""
"""
def evaluate_basis_at(self, grid, component, *, prefactor=False):
r"""Evaluate the basis functions :math:`\phi_k` recursively at the given nodes :math:`\gamma`.
:param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
:type grid: A class having a :py:meth:`get_nodes(...)` method.
:param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
:param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
:type prefactor: Boolean, default is ``False``.
:return: A two-dimensional ndarray :math:`H` of shape :math:`(|\mathfrak{K}_i|, |\Gamma|)` where
the entry :math:`H[\mu(k), i]` is the value of :math:`\phi_k(\gamma_i)`.
"""
D = self._dimension
bas = self._basis_shapes[component]
bs = self._basis_sizes[component]
# The grid
grid = self._grid_wrap(grid)
nodes = grid.get_nodes()
nn = grid.get_number_nodes(overall=True)
# Allocate the storage array
phi = zeros((bs, nn), dtype=complexfloating)
# Precompute some constants
Pi = self.get_parameters(component=component)
q, p, Q, P, _ = Pi
# Transformation to {w} basis
_, PA = polar(Q, side='left')
EW, EV = eigh(real(PA))
Qinv = dot(diag(1.0 / EW), EV.T)
QQ = identity(D)
# Compute the ground state phi_0 via direct evaluation
mu0 = bas[tuple(D * [0])]
phi[mu0, :] = self._evaluate_phi0(component, nodes, prefactor=False)
# Compute all higher order states phi_k via recursion
for d in range(D):
# Iterator for all valid index vectors k
indices = bas.get_node_iterator(mode="chain", direction=d)
for k in indices:
# Current index vector
ki = vstack(k)
# Access predecessors
phim = zeros((D, nn), dtype=complexfloating)
for j, kpj in bas.get_neighbours(k, selection="backward"):
mukpj = bas[kpj]
phim[j, :] = phi[mukpj, :]
# Compute 3-term recursion
p1 = (nodes - q) * phi[bas[k], :]
p2 = sqrt(ki) * phim
t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
t2 = dot(QQ[d, :], p2)
# Find multi-index where to store the result
kped = bas.get_neighbours(k, selection="forward", direction=d)
# Did we find this k?
if len(kped) > 0:
kped = kped[0]
# Store computed value
phi[bas[kped[1]], :] = (t1 - t2) / sqrt(ki[d] + 1.0)
if prefactor is True:
phi = phi / self._get_sqrt(component)(det(Q))
return phi
def slim_recursion(self, grid, component, *, prefactor=False):
r"""Evaluate the Hagedorn wavepacket :math:`\Psi` at the given nodes :math:`\gamma`.
This routine is a slim version compared to the full basis evaluation. At every moment
we store only the data we really need to compute the next step until we hit the highest
order basis functions.
:param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
:type grid: A class having a :py:meth:`get_nodes(...)` method.
:param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
:param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
:type prefactor: Boolean, default is ``False``.
:return: A list of arrays or a single array containing the values of the :math:`\Phi_i`
at the nodes :math:`\gamma`.
Note that this function does not include the global phase :math:`\exp(\frac{i S}{\varepsilon^2})`.
"""
D = self._dimension
# Precompute some constants
Pi = self.get_parameters(component=component)
q, p, Q, P, _ = Pi
# Transformation to {w} basis
_, PA = polar(Q, side='left')
EW, EV = eigh(real(PA))
Qinv = dot(diag(1.0 / EW), EV.T)
QQ = identity(D)
# The basis shape
bas = self._basis_shapes[component]
Z = tuple(D * [0])
# Book keeping
todo = []
newtodo = [Z]
olddelete = []
delete = []
tmp = {}
# The grid nodes
grid = self._grid_wrap(grid)
nn = grid.get_number_nodes(overall=True)
nodes = grid.get_nodes()
# Evaluate phi0
tmp[Z] = self._evaluate_phi0(component, nodes, prefactor=False)
psi = self._coefficients[component][bas[Z], 0] * tmp[Z]
# Iterate for higher order states
while len(newtodo) != 0:
# Delete results that never will be used again
for d in olddelete:
del tmp[d]
# Exchange queues
todo = newtodo
newtodo = []
olddelete = delete
delete = []
# Compute new results
for k in todo:
# Center stencil at node k
ki = vstack(k)
# Access predecessors
phim = zeros((D, nn), dtype=complexfloating)
for j, kpj in bas.get_neighbours(k, selection="backward"):
phim[j, :] = tmp[kpj]
# Compute the neighbours
for d, n in bas.get_neighbours(k, selection="forward"):
if n not in tmp.keys():
# Compute 3-term recursion
p1 = (nodes - q) * tmp[k]
p2 = sqrt(ki) * phim
t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
t2 = dot(QQ[d, :], p2)
# Store computed value
tmp[n] = (t1 - t2) / sqrt(ki[d] + 1.0)
# And update the result
psi = psi + self._coefficients[component][bas[n], 0] * tmp[n]
newtodo.append(n)
delete.append(k)
if prefactor is True:
psi = psi / self._get_sqrt(component)(det(Q))
return psi
| bsd-3-clause |
Bysmyyr/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/httplib2/python3/httplib2/__init__.py | 29 | 56301 |
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 3.0 or later
Changelog:
2009-05-28, Pilgrim: ported to Python 3
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger",
"Mark Pilgrim"]
__license__ = "MIT"
__version__ = "0.7.7"
import re
import sys
import email
import email.utils
import email.message
import email.feedparser
import io
import gzip
import zlib
import http.client
import urllib.parse
import base64
import os
import copy
import calendar
import time
import random
import errno
from hashlib import sha1 as _sha, md5 as _md5
import hmac
from gettext import gettext as _
import socket
import ssl
_ssl_wrap_socket = ssl.wrap_socket
try:
import socks
except ImportError:
socks = None
from .iri2uri import iri2uri
def has_timeout(timeout):
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit',
'FailedToDecompressContent', 'UnimplementedDigestAuthOptionError',
'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'RETRIES']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class CertificateValidationUnsupportedInPython31(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in list(response.keys()) if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(br'^\w+://')
re_url_scheme_s = re.compile(r'^\w+://')
re_slash = re.compile(br'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme_s.match(filename):
if isinstance(filename,bytes):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,str):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest().encode('utf-8')
filename = re_url_scheme.sub(b"", filename)
filename = re_slash.sub(b",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return b",".join((filename, filemd5)).decode('utf-8')
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.items()])
def _parse_cache_control(headers):
retval = {}
if 'cache-control' in headers:
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headername in headers:
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if 'pragma' in request_headers and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif 'no-cache' in cc:
retval = "TRANSPARENT"
elif 'no-cache' in cc_response:
retval = "STALE"
elif 'only-if-cached' in cc:
retval = "FRESH"
elif 'date' in response_headers:
date = calendar.timegm(email.utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if 'max-age' in cc_response:
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif 'expires' in response_headers:
expires = email.utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=io.BytesIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _bind_write_headers(msg):
from email.header import Header
def _write_headers(self):
# Self refers to the Generator object
for h, v in msg.items():
print('%s:' % h, end=' ', file=self._fp)
if isinstance(v, Header):
print(v.encode(maxlinelen=self._maxheaderlen), file=self._fp)
else:
# Header's got lots of smarts, so use it.
header = Header(v, maxlinelen=self._maxheaderlen, charset='utf-8',
header_name=h)
print(header.encode(), file=self._fp)
# A blank line always separates headers from body
print(file=self._fp)
return _write_headers
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if 'no-store' in cc or 'no-store' in cc_response:
cache.delete(cachekey)
else:
info = email.message.Message()
for key, value in response_headers.items():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
try:
header_str = info.as_string()
except UnicodeEncodeError:
setattr(info, '_write_headers', _bind_write_headers(info))
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = b"".join([status_header.encode('utf-8'), header_str.encode('utf-8'), content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5(("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).encode('utf-8')).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha(("%s%s%s" % (cnonce, iso_now, password)).encode('utf-8')).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
def __eq__(self, auth):
return False
def __ne__(self, auth):
return True
def __lt__(self, auth):
return True
def __gt__(self, auth):
return False
def __le__(self, auth):
return True
def __ge__(self, auth):
return False
def __bool__(self):
return True
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode(("%s:%s" % self.credentials).encode('utf-8')).strip().decode('utf-8')
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x.encode('utf-8')).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (
self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'])
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if 'authentication-info' not in response:
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if 'nextnonce' in updated_challenge:
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib.parse import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = open(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = open(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
def proxy_info_from_environment(method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ('http', 'https'):
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
return proxy_info_from_url(url, method)
def proxy_info_from_url(url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urllib.parse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return ProxyInfo(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
class HTTPConnectionWithTimeout(http.client.HTTPConnection):
"""HTTPConnection subclass that supports timeouts
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, timeout=None, proxy_info=None):
http.client.HTTPConnection.__init__(self, host, port=port,
timeout=timeout)
self.proxy_info = proxy_info
class HTTPSConnectionWithTimeout(http.client.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
self.proxy_info = proxy_info
context = None
if ca_certs is None:
ca_certs = CA_CERTS
if (cert_file or ca_certs) and not disable_ssl_certificate_validation:
if not hasattr(ssl, 'SSLContext'):
raise CertificateValidationUnsupportedInPython31()
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
if cert_file:
context.load_cert_chain(cert_file, key_file)
if ca_certs:
context.load_verify_locations(ca_certs)
http.client.HTTPSConnection.__init__(
self, host, port=port, key_file=key_file,
cert_file=cert_file, timeout=timeout, context=context,
check_hostname=True)
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout,
}
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_info_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if 'request' in state_dict:
del state_dict['request']
if 'connections' in state_dict:
del state_dict['connections']
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if scheme in challenges:
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(RETRIES):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
conn.close()
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except socket.error as e:
errno_ = (e.args[0].errno if isinstance(e.args[0], socket.error) else e.errno)
if errno_ == errno.ECONNREFUSED: # Connection refused
raise
except http.client.HTTPException:
if conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
pass
try:
response = conn.getresponse()
except socket.timeout:
raise
except (socket.error, http.client.HTTPException):
conn.close()
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = b""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if 'location' not in response and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if 'location' in response:
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urllib.parse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if 'content-location' not in response:
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if 'if-none-match' in headers:
del headers['if-none-match']
if 'if-modified-since' in headers:
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if 'location' in response:
location = response['location']
old_response = copy.deepcopy(response)
if 'content-location' not in old_response:
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than redirection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if 'content-location' not in response:
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if 'user-agent' not in headers:
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if issubclass(connection_type, HTTPSConnectionWithTimeout):
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
try:
info, content = cached_value.split(b'\r\n\r\n', 1)
info = email.message_from_bytes(info)
for k, v in info.items():
if v.startswith('=?') and v.endswith('?='):
info.replace_header(k,
str(*email.header.decode_header(v)[0]))
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and 'etag' in info and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if '-x-permanent-redirect-url' in info:
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than redirection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = b""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if 'etag' in info and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if 'last-modified' in info and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if 'only-if-cached'in cc:
info['status'] = '504'
response = Response(info)
content = b""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception as e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = b"Request Timeout"
response = Response({
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e).encode('utf-8')
response = Response({
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.message or
# an httplib.HTTPResponse object.
if isinstance(info, http.client.HTTPResponse):
for key, value in info.getheaders():
key = key.lower()
prev = self.get(key)
if prev is not None:
value = ', '.join((prev, value))
self[key] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.message.Message):
for key, value in list(info.items()):
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.items():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError(name)
| bsd-3-clause |
flopp/GpxTrackPoster | gpxtrackposter/calendar_drawer.py | 1 | 5500 | """Draw a calendar poster."""
# Copyright 2016-2021 Florian Pigorsch & Contributors. All rights reserved.
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import calendar
import datetime
import svgwrite # type: ignore
from gpxtrackposter import utils
from gpxtrackposter.exceptions import PosterError
from gpxtrackposter.localization import localized_day_of_week_name
from gpxtrackposter.poster import Poster
from gpxtrackposter.tracks_drawer import TracksDrawer
from gpxtrackposter.xy import XY
class CalendarDrawer(TracksDrawer):
"""Draw a calendar poster."""
def __init__(self, the_poster: Poster):
super().__init__(the_poster)
def draw(self, dr: svgwrite.Drawing, g: svgwrite.container.Group, size: XY, offset: XY) -> None:
"""Iterate through the Poster's years, creating a calendar for each."""
if self.poster.tracks is None:
raise PosterError("No tracks to draw.")
years = self.poster.years.count()
_, counts = utils.compute_grid(years, size)
if counts is None:
raise PosterError("Unable to compute grid.")
count_x, count_y = counts[0], counts[1]
x, y = 0, 0
cell_size = size * XY(1 / count_x, 1 / count_y)
margin = XY(4, 8)
if count_x <= 1:
margin.x = 0
if count_y <= 1:
margin.y = 0
sub_size = cell_size - 2 * margin
for year in self.poster.years.iter():
g_year = dr.g(id=f"year{year}")
g.add(g_year)
self._draw(dr, g_year, sub_size, offset + margin + cell_size * XY(x, y), year)
x += 1
if x >= count_x:
x = 0
y += 1
def _draw(self, dr: svgwrite.Drawing, g: svgwrite.container.Group, size: XY, offset: XY, year: int) -> None:
min_size = min(size.x, size.y)
year_size = min_size * 4.0 / 80.0
year_style = f"font-size:{year_size}px; font-family:Arial;"
month_style = f"font-size:{min_size * 3.0 / 80.0}px; font-family:Arial;"
day_style = f"dominant-baseline: central; font-size:{min_size * 1.0 / 80.0}px; font-family:Arial;"
day_length_style = f"font-size:{min_size * 1.0 / 80.0}px; font-family:Arial;"
g.add(
dr.text(
f"{year}",
insert=offset.tuple(),
fill=self.poster.colors["text"],
alignment_baseline="hanging",
style=year_style,
)
)
offset.y += year_size
size.y -= year_size
count_x = 31
for month in range(1, 13):
date = datetime.date(year, month, 1)
(_, last_day) = calendar.monthrange(year, month)
count_x = max(count_x, date.weekday() + last_day)
cell_size = min(size.x / count_x, size.y / 36)
spacing = XY(
(size.x - cell_size * count_x) / (count_x - 1),
(size.y - cell_size * 3 * 12) / 11,
)
for month in range(1, 13):
date = datetime.date(year, month, 1)
y = month - 1
y_pos = offset.y + (y * 3 + 1) * cell_size + y * spacing.y
g.add(
dr.text(
self.poster.month_name(month),
insert=(offset.x, y_pos - 2),
fill=self.poster.colors["text"],
alignment_baseline="hanging",
style=month_style,
)
)
day_offset = date.weekday()
while date.month == month:
x = date.day - 1
x_pos = offset.x + (day_offset + x) * cell_size + x * spacing.x
pos = (x_pos + 0.05 * cell_size, y_pos + 1.15 * cell_size)
dim = (cell_size * 0.9, cell_size * 0.9)
text_date = date.strftime("%Y-%m-%d")
if text_date in self.poster.tracks_by_date:
tracks = self.poster.tracks_by_date[text_date]
length = sum([t.length() for t in tracks])
has_special = len([t for t in tracks if t.special]) > 0
color = self.color(self.poster.length_range_by_date, length, has_special)
g.add(dr.rect(pos, dim, fill=color))
g.add(
dr.text(
utils.format_float(self.poster.m2u(length)),
insert=(
pos[0] + cell_size / 2,
pos[1] + cell_size + cell_size / 2,
),
text_anchor="middle",
style=day_length_style,
fill=self.poster.colors["text"],
)
)
else:
g.add(dr.rect(pos, dim, fill="#444444"))
g.add(
dr.text(
localized_day_of_week_name(date.weekday(), short=True),
insert=(
offset.x + (day_offset + x) * cell_size + cell_size / 2,
pos[1] + cell_size / 2,
),
text_anchor="middle",
alignment_baseline="middle",
style=day_style,
)
)
date += datetime.timedelta(1)
| mit |
allotria/intellij-community | python/testData/inspections/PyProtocolInspection/incompatibleProtocolSubclass.py | 19 | 1597 | from typing import Protocol
class MyProtocol(Protocol):
attr: int
def func(self, p: int) -> str:
pass
class MyClass1(MyProtocol):
def __init__(self, attr: int) -> None:
self.attr = attr
def <warning descr="Type of 'func' is incompatible with 'MyProtocol'">func</warning>(self, p: str) -> int:
pass
class MyClass2(MyProtocol):
def __init__(self, attr: str) -> None:
self.attr = attr # mypy says nothing
def func(self, p: int) -> str:
pass
class MyClass3(MyProtocol):
def __init__(self, attr: str) -> None:
self.attr = attr # mypy says nothing
def <warning descr="Type of 'func' is incompatible with 'MyProtocol'">func</warning>(self, p: str) -> int:
pass
class MyClass4(MyProtocol):
attr: int
def <warning descr="Type of 'func' is incompatible with 'MyProtocol'">func</warning>(self, p: str) -> int:
pass
class MyClass5(MyProtocol):
<warning descr="Type of 'attr' is incompatible with 'MyProtocol'">attr</warning>: str
def func(self, p: int) -> str:
pass
class MyClass6(MyProtocol):
<warning descr="Type of 'attr' is incompatible with 'MyProtocol'">attr</warning>: str
def <warning descr="Type of 'func' is incompatible with 'MyProtocol'">func</warning>(self, p: str) -> int:
pass
class HisProtocol(MyProtocol, Protocol):
<warning descr="Type of 'attr' is incompatible with 'MyProtocol'">attr</warning>: str
def <warning descr="Type of 'func' is incompatible with 'MyProtocol'">func</warning>(self, p: str) -> int:
pass
| apache-2.0 |
shaistaansari/django | tests/utils_tests/test_text.py | 243 | 9471 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.test import SimpleTestCase
from django.utils import six, text
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.translation import override
lazystr = lazy(force_text, six.text_type)
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.chars(100)),
self.assertEqual('The quick brown fox ...',
truncator.chars(23)),
self.assertEqual('The quick brown fo.....',
truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy '
'dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]',
truncator.words(4, '[snip]'))
def test_truncate_html_words(self):
truncator = text.Truncator('<p id="par"><strong><em>The quick brown fox'
' jumped over the lazy dog.</em></strong></p>')
self.assertEqual('<p id="par"><strong><em>The quick brown fox jumped over'
' the lazy dog.</em></strong></p>', truncator.words(10, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox...</em>'
'</strong></p>', truncator.words(4, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox....</em>'
'</strong></p>', truncator.words(4, '....', html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox</em>'
'</strong></p>', truncator.words(4, '', html=True))
# Test with new line inside tag
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
self.assertEqual('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over'
' the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown...',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> '
'jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown...</em>',
truncator.words(3, '...', html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días!'
' ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo...</i>',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python...</p>',
truncator.words(3, '...', html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7),
'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10),
'a\n%s\nword' % long_word)
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"),
"abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
def test_normalize_newlines_bytes(self):
"""normalize_newlines should be able to handle bytes too"""
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
self.assertEqual(normalized, "abc\ndef\nghi\n")
self.assertIsInstance(normalized, six.text_type)
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode('utf-8') for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertTrue(compressed_length < actual_length)
| bsd-3-clause |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/freestyle/styles/blueprint_ellipses.py | 6 | 1864 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : blueprint_ellipses.py
# Author : Emmanuel Turquin
# Date : 04/08/2005
# Purpose : Produces a blueprint using elliptic contour strokes
from freestyle.chainingiterators import ChainPredicateIterator
from freestyle.predicates import (
AndUP1D,
ContourUP1D,
NotUP1D,
QuantitativeInvisibilityUP1D,
SameShapeIdBP1D,
TrueUP1D,
pyHigherLengthUP1D,
)
from freestyle.shaders import (
ConstantThicknessShader,
IncreasingColorShader,
pyBluePrintEllipsesShader,
pyPerlinNoise1DShader,
)
from freestyle.types import Operators
upred = AndUP1D(QuantitativeInvisibilityUP1D(0), ContourUP1D())
bpred = SameShapeIdBP1D()
Operators.select(upred)
Operators.bidirectional_chain(ChainPredicateIterator(upred, bpred), NotUP1D(upred))
Operators.select(pyHigherLengthUP1D(200))
shaders_list = [
ConstantThicknessShader(5),
pyBluePrintEllipsesShader(3),
pyPerlinNoise1DShader(0.1, 10, 8),
IncreasingColorShader(0.6, 0.3, 0.3, 0.7, 0.3, 0.3, 0.3, 0.1),
]
Operators.create(TrueUP1D(), shaders_list)
| gpl-3.0 |
tmxdyf/CouchPotatoServer | libs/apscheduler/triggers/cron/fields.py | 115 | 3058 | """
Fields represent CronTrigger options which map to :class:`~datetime.datetime`
fields.
"""
from calendar import monthrange
from apscheduler.triggers.cron.expressions import *
__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField',
'WeekField', 'DayOfMonthField', 'DayOfWeekField')
MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1,
'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0}
MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53,
'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59}
DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*',
'day_of_week': '*', 'hour': 0, 'minute': 0, 'second': 0}
class BaseField(object):
REAL = True
COMPILERS = [AllExpression, RangeExpression]
def __init__(self, name, exprs, is_default=False):
self.name = name
self.is_default = is_default
self.compile_expressions(exprs)
def get_min(self, dateval):
return MIN_VALUES[self.name]
def get_max(self, dateval):
return MAX_VALUES[self.name]
def get_value(self, dateval):
return getattr(dateval, self.name)
def get_next_value(self, dateval):
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
if smallest is None or (value is not None and value < smallest):
smallest = value
return smallest
def compile_expressions(self, exprs):
self.expressions = []
# Split a comma-separated expression list, if any
exprs = str(exprs).strip()
if ',' in exprs:
for expr in exprs.split(','):
self.compile_expression(expr)
else:
self.compile_expression(exprs)
def compile_expression(self, expr):
for compiler in self.COMPILERS:
match = compiler.value_re.match(expr)
if match:
compiled_expr = compiler(**match.groupdict())
self.expressions.append(compiled_expr)
return
raise ValueError('Unrecognized expression "%s" for field "%s"' %
(expr, self.name))
def __str__(self):
expr_strings = (str(e) for e in self.expressions)
return ','.join(expr_strings)
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.name,
str(self))
class WeekField(BaseField):
REAL = False
def get_value(self, dateval):
return dateval.isocalendar()[1]
class DayOfMonthField(BaseField):
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression,
LastDayOfMonthExpression]
def get_max(self, dateval):
return monthrange(dateval.year, dateval.month)[1]
class DayOfWeekField(BaseField):
REAL = False
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
def get_value(self, dateval):
return dateval.weekday()
| gpl-3.0 |
netcon-source/OpenClimateGIS | src/openclimategis/util/raw_sql.py | 7 | 1311 | import os
from django.db import connection
def execute(sql):
cursor = connection.cursor()
try:
cursor.execute(sql)
rows = cursor.fetchall()
finally:
cursor.close()
return(rows)
def get_dataset(archive_id,variable_name,scenario_code,time_range,cm_code):
sql = """
with
climatemodel as (
select *
from climatedata_climatemodel
where archive_id = {archive_id} and
code = '{cm_code}'
),
variable as (
select *
from climatedata_variable
where name = '{variable_name}'
),
scenario as (
select *
from climatedata_scenario
where code = '{scenario_code}'
),
base_datasets as (
select *
from climatedata_dataset
where climatemodel_id in (select id from climatemodel) and
scenario_id in (select id from scenario)
),
time as (
select distinct dataset_id
from climatedata_indextime
where dataset_id in (select id from base_datasets) and
value between '{lower}' and '{upper}'
)
select variable.dataset_id
from variable,time
where variable.dataset_id = time.dataset_id;
"""
sql = sql.format(archive_id=archive_id,
variable_name=variable_name,
scenario_code=scenario_code,
lower=time_range[0],
upper=time_range[1],
cm_code=cm_code)
return(sql) | bsd-3-clause |
Cuuuurzel/KiPyCalc | sympy/diffgeom/tests/test_function_diffgeom_book.py | 27 | 5286 | from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r
from sympy.diffgeom import (intcurve_series, intcurve_diffequ, Differential,
WedgeProduct)
from sympy.core import symbols, Function, Derivative
from sympy.simplify import trigsimp, simplify
from sympy.functions import sqrt, atan2, sin, cos
from sympy.matrices import Matrix
# Most of the functionality is covered in the
# test_functional_diffgeom_ch* tests which are based on the
# example from the paper of Sussman and Wisdom.
# If they do not cover something, additional tests are added in other test
# functions.
# From "Functional Differential Geometry" as of 2011
# by Sussman and Wisdom.
def test_functional_diffgeom_ch2():
x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True)
x, y = symbols('x, y', real=True)
f = Function('f')
assert (R2_p.point_to_coords(R2_r.point([x0, y0])) ==
Matrix([sqrt(x0**2 + y0**2), atan2(y0, x0)]))
assert (R2_r.point_to_coords(R2_p.point([r0, theta0])) ==
Matrix([r0*cos(theta0), r0*sin(theta0)]))
assert R2_p.jacobian(R2_r, [r0, theta0]) == Matrix(
[[cos(theta0), -r0*sin(theta0)], [sin(theta0), r0*cos(theta0)]])
field = f(R2.x, R2.y)
p1_in_rect = R2_r.point([x0, y0])
p1_in_polar = R2_p.point([sqrt(x0**2 + y0**2), atan2(y0, x0)])
assert field.rcall(p1_in_rect) == f(x0, y0)
assert field.rcall(p1_in_polar) == f(x0, y0)
p_r = R2_r.point([x0, y0])
p_p = R2_p.point([r0, theta0])
assert R2.x(p_r) == x0
assert R2.x(p_p) == r0*cos(theta0)
assert R2.r(p_p) == r0
assert R2.r(p_r) == sqrt(x0**2 + y0**2)
assert R2.theta(p_r) == atan2(y0, x0)
h = R2.x*R2.r**2 + R2.y**3
assert h.rcall(p_r) == x0*(x0**2 + y0**2) + y0**3
assert h.rcall(p_p) == r0**3*sin(theta0)**3 + r0**3*cos(theta0)
def test_functional_diffgeom_ch3():
x0, y0 = symbols('x0, y0', real=True)
x, y, t = symbols('x, y, t', real=True)
f = Function('f')
b1 = Function('b1')
b2 = Function('b2')
p_r = R2_r.point([x0, y0])
s_field = f(R2.x, R2.y)
v_field = b1(R2.x)*R2.e_x + b2(R2.y)*R2.e_y
assert v_field.rcall(s_field).rcall(p_r).doit() == b1(
x0)*Derivative(f(x0, y0), x0) + b2(y0)*Derivative(f(x0, y0), y0)
assert R2.e_x(R2.r**2).rcall(p_r) == 2*x0
v = R2.e_x + 2*R2.e_y
s = R2.r**2 + 3*R2.x
assert v.rcall(s).rcall(p_r).doit() == 2*x0 + 4*y0 + 3
circ = -R2.y*R2.e_x + R2.x*R2.e_y
series = intcurve_series(circ, t, R2_r.point([1, 0]), coeffs=True)
series_x, series_y = zip(*series)
assert all(
[term == cos(t).taylor_term(i, t) for i, term in enumerate(series_x)])
assert all(
[term == sin(t).taylor_term(i, t) for i, term in enumerate(series_y)])
def test_functional_diffgeom_ch4():
x0, y0, theta0 = symbols('x0, y0, theta0', real=True)
x, y, r, theta = symbols('x, y, r, theta', real=True)
r0 = symbols('r0', positive=True)
f = Function('f')
b1 = Function('b1')
b2 = Function('b2')
p_r = R2_r.point([x0, y0])
p_p = R2_p.point([r0, theta0])
f_field = b1(R2.x, R2.y)*R2.dx + b2(R2.x, R2.y)*R2.dy
assert f_field.rcall(R2.e_x).rcall(p_r) == b1(x0, y0)
assert f_field.rcall(R2.e_y).rcall(p_r) == b2(x0, y0)
s_field_r = f(R2.x, R2.y)
df = Differential(s_field_r)
assert df(R2.e_x).rcall(p_r).doit() == Derivative(f(x0, y0), x0)
assert df(R2.e_y).rcall(p_r).doit() == Derivative(f(x0, y0), y0)
s_field_p = f(R2.r, R2.theta)
df = Differential(s_field_p)
assert trigsimp(df(R2.e_x).rcall(p_p).doit()) == (
cos(theta0)*Derivative(f(r0, theta0), r0) -
sin(theta0)*Derivative(f(r0, theta0), theta0)/r0)
assert trigsimp(df(R2.e_y).rcall(p_p).doit()) == (
sin(theta0)*Derivative(f(r0, theta0), r0) +
cos(theta0)*Derivative(f(r0, theta0), theta0)/r0)
assert R2.dx(R2.e_x).rcall(p_r) == 1
assert R2.dx(R2.e_x) == 1
assert R2.dx(R2.e_y).rcall(p_r) == 0
assert R2.dx(R2.e_y) == 0
circ = -R2.y*R2.e_x + R2.x*R2.e_y
assert R2.dx(circ).rcall(p_r).doit() == -y0
assert R2.dy(circ).rcall(p_r) == x0
assert R2.dr(circ).rcall(p_r) == 0
assert simplify(R2.dtheta(circ).rcall(p_r)) == 1
assert (circ - R2.e_theta).rcall(s_field_r).rcall(p_r) == 0
def test_functional_diffgeom_ch6():
u0, u1, u2, v0, v1, v2, w0, w1, w2 = symbols('u0:3, v0:3, w0:3', real=True)
u = u0*R2.e_x + u1*R2.e_y
v = v0*R2.e_x + v1*R2.e_y
wp = WedgeProduct(R2.dx, R2.dy)
assert wp(u, v) == u0*v1 - u1*v0
u = u0*R3_r.e_x + u1*R3_r.e_y + u2*R3_r.e_z
v = v0*R3_r.e_x + v1*R3_r.e_y + v2*R3_r.e_z
w = w0*R3_r.e_x + w1*R3_r.e_y + w2*R3_r.e_z
wp = WedgeProduct(R3_r.dx, R3_r.dy, R3_r.dz)
assert wp(
u, v, w) == Matrix(3, 3, [u0, u1, u2, v0, v1, v2, w0, w1, w2]).det()
a, b, c = symbols('a, b, c', cls=Function)
a_f = a(R3_r.x, R3_r.y, R3_r.z)
b_f = b(R3_r.x, R3_r.y, R3_r.z)
c_f = c(R3_r.x, R3_r.y, R3_r.z)
theta = a_f*R3_r.dx + b_f*R3_r.dy + c_f*R3_r.dz
dtheta = Differential(theta)
da = Differential(a_f)
db = Differential(b_f)
dc = Differential(c_f)
expr = dtheta - WedgeProduct(
da, R3_r.dx) - WedgeProduct(db, R3_r.dy) - WedgeProduct(dc, R3_r.dz)
assert expr.rcall(R3_r.e_x, R3_r.e_y) == 0
| mit |
2014/qqweibo | tests/test_pyqqweibo.py | 4 | 27266 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# FileName : test_pyqqweibo.py
# Author : Feather.et.ELF <fledna@qq.com>
# Created : Wed Jun 08 10:20:57 2011 by Feather.et.ELF
# Copyright : Feather Workshop (c) 2011
# Description : testcast
# Time-stamp: <2011-06-09 22:18:47 andelf>
from __future__ import unicode_literals
from __future__ import print_function
import sys
import time
from random import randint
import unittest
sys.path.insert(0, '..')
from qqweibo import *
from qqweibo import models
def contenttype_tester(apifunc, reqnum, contenttype, **kwargs):
# contenttype: content filter
# FIXME: type1 | type2 not supported
if contenttype not in [1, 2, 4, 8, 0x10]:
return
ret = apifunc(reqnum=reqnum, contenttype=contenttype, **kwargs)
if not ret:
print ('No test for contenttype 0x%x' % contenttype)
return
if contenttype & 1:
# Text
for t in ret:
assert bool(t.text)
if contenttype & 2:
# LINK
for t in ret:
# typically works, because all url will be translated
# to http://url.cn/somewhat
assert ('http://' in t.origtext) or \
(t.source and ('http://' in t.source.origtext))
if contenttype & 4:
# IMAGE
for t in ret:
assert t.image or (t.source and t.source.image)
if contenttype & 8:
# VIDEO
# BUG: .video sometimes is None
for t in ret:
assert t.video or (t.source and t.source.video) or \
(('视频' in t.origtext) or \
(t.source and ('视频' in t.source.origtext)))
if contenttype & 0x10:
# MUSIC
for t in ret:
assert t.music or (t.source and t.source.music)
return True
def test():
"""This Must Pass"""
pass
def test_get_access_token():
"""TODO: write later"""
pass
#assert access_token.key
#assert access_token.secret
#auth.get_authorization_url()
#print (a.get_authorization_url())
#verifier = raw_input('PIN: ').strip()
#access_token = a.get_access_token(verifier)
#token = access_token.key
#tokenSecret = access_token.secret
#print (access_token.key)
#print (access_token.secret)
#auth.setToken(token, tokenSecret)
class QWeiboTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""generate OAuthHandler"""
import secret
auth = OAuthHandler(secret.apiKey, secret.apiSecret)
token = secret.token
tokenSecret = secret.tokenSecret
auth.setToken(token, tokenSecret)
cls.auth = auth
class MemoryCacheTestCase(QWeiboTestCase):
def test_MemoryCache(self):
"""MemoryCache"""
api = API(self.auth, cache=MemoryCache())
ret = api.timeline.home(reqnum=100)
startTime = time.time()
ret2 = api.timeline.home(reqnum=100)
endTime = time.time()
self.assertEqual(ret[0].id, ret2[0].id)
self.assertEqual(ret[-1].id, ret2[-1].id)
self.assertLess(endTime - startTime, 0.01)
class FileCacheTestCase(QWeiboTestCase):
def setUp(self):
#super(FileCacheTestCase, self).setUp()
import tempfile
self.tmpdir = tempfile.mkdtemp()
def test_FileCache(self):
"""FileCache"""
api = API(self.auth, cache=FileCache(self.tmpdir), )
ret = api.timeline.public(reqnum=100)
startTime = time.time()
ret2 = api.timeline.public(reqnum=100)
endTime = time.time()
self.assertEqual(ret[0].id, ret2[0].id)
self.assertEqual(ret[-1].id, ret2[-1].id)
self.assertLess(endTime - startTime, 0.1)
def teardown():
import shutil
shutil.rmtree(self.tmpdir)
class RarserTestCase(QWeiboTestCase):
def test_XMLRawParser(self):
"""XMLRawParser"""
import xml.dom.minidom
api = API(self.auth, parser=XMLRawParser())
ret = api.info.update()
assert len(ret) > 0
xml.dom.minidom.parseString(ret)
def test_XMLDomParser(self):
"""XMLDomParser"""
api = API(self.auth, parser=XMLDomParser())
ret = api.user.userinfo('andelf')
assert hasattr(ret, 'getElementsByTagName')
assert len(ret.getElementsByTagName('nick')) == 1
def test_XMLETreeParser(self):
"""XMLETreeParser"""
api = API(self.auth, parser=XMLETreeParser())
ret = api.user.userinfo('andelf')
assert hasattr(ret, 'findtext')
assert ret.findtext('data/nick')
def test_ModelParser(self):
"""ModelParser"""
from qqweibo.models import User
api = API(self.auth, parser=ModelParser())
ret = api.user.userinfo('andelf')
assert type(ret) == User
assert hasattr(ret, 'name')
api = API(self.auth)
ret = api.user.userinfo('andelf')
assert type(ret) == User
def test_JSONParser(self):
"""JSONParser"""
api = API(self.auth, parser=JSONParser())
ret = api.user.userinfo('andelf')
assert 'msg' in ret
assert ret['msg'] == 'ok'
assert 'data' in ret
assert 'name' in ret['data']
# === API test ===
class APITestCase(QWeiboTestCase):
@classmethod
def setUpClass(cls):
super(APITestCase, cls).setUpClass()
cls.api = API(cls.auth)
class TimelineAPITestCase(APITestCase):
def test_home(self):
"""api.timeline.home"""
api = self.api
ret = api.timeline.home()
assert isinstance(ret, list)
assert len(ret) <= 20
if len(ret) > 1:
assert isinstance(ret[0], models.Tweet)
for ct in [1, 2, 4, 8, 0x10]:
contenttype_tester(api.timeline.home,
reqnum=1,
contenttype=ct)
ret = api.timeline.home(reqnum=100)
assert len(ret) == 70
assert ret.hasnext
num = randint(1, 70)
ret = api.timeline.home(reqnum=num)
assert len(ret) == num
assert ret.hasnext
def test_public(self):
"""api.timeline.public"""
api = self.api
ret = api.timeline.public()
assert len(ret) == 20
assert type(ret[0]) == models.Tweet
ret = api.timeline.public()
assert len(ret) == 20
ret = api.timeline.public(reqnum=130)
assert len(ret) == 100
def test_user(self):
"""api.timeline.user"""
api = self.api
ret = api.timeline.user('andelf')
assert len(ret) == 20
assert type(ret[0]) == models.Tweet
assert ret[0].name == 'andelf'
assert ret.hasnext
for ct in [1, 2, 4, 8, 0x10]:
contenttype_tester(api.timeline.user,
reqnum=1,
contenttype=ct,
name='andelf')
ret = api.timeline.user(name='andelf', reqnum=120)
assert len(ret) == 70
assert ret.hasnext
num = randint(1, 70)
ret = api.timeline.user(name='andelf', reqnum=num)
assert len(ret) == num
assert ret.hasnext
def test_mentions(self):
"""api.timeline.mentions"""
api = self.api
ret = api.timeline.mentions()
username = self.auth.get_username()
assert 1 < len(ret) <= 20
assert type(ret[0]) == models.Tweet
# ugly but works
# BUG: it also returns retweets of my tweet, no @myusername
assert (username in ret[0].origtext + ret[0].name) or \
(ret[0].source and (username in \
ret[0].source.origtext + ret[0].source.name))
for ct in [1, 2, 4, 8, 0x10]:
contenttype_tester(api.timeline.mentions,
reqnum=1,
contenttype=ct)
ret = api.timeline.mentions(reqnum=120)
assert len(ret) == 70
assert ret.hasnext
ret = api.timeline.mentions(reqnum=64)
assert len(ret) == 64
assert ret.hasnext
def test_topic(self):
"""api.timeline.topic"""
api = self.api
ret = api.timeline.topic(httext='这里是辽宁')
# BUG: 默认为 20, 但大部分情况下即使热门话题, 返回都会少一些
assert len(ret) <= 20
assert type(ret[0]) == models.Tweet
assert '这里是辽宁' in ret[0].origtext
# BUG: hasnext = 2 not 0
assert ret.hasnext
for reqnum in [120, randint(1, 100), randint(1, 100)]:
ret = api.timeline.topic(httext='毕业', reqnum=reqnum)
# BUG: this will range from 90 or so to 100
assert len(ret) <= 100
# BUG: generally return count will be 0-10 less than reqnum
assert len(ret) <= reqnum
assert ret.hasnext
# NOTE: I don't know why, ask tencent please
def test_broadcast(self):
"""api.timeline.broadcast"""
api = self.api
username = self.auth.get_username()
ret = api.timeline.broadcast()
assert len(ret) == 20
assert type(ret[0]) == models.Tweet
assert username == ret[0].name
for ct in [1, 2, 4, 8, 0x10]:
contenttype_tester(api.timeline.broadcast,
reqnum=1,
contenttype=ct)
ret = api.timeline.broadcast(reqnum=110)
assert len(ret) == 70
num = randint(1, 70)
ret = api.timeline.broadcast(reqnum=num)
assert len(ret) == num
def test_special(self):
"""api.timeline.special"""
api = self.api
ret = api.timeline.special()
assert 1 <= len(ret) <= 20, 'You should add special listen ' \
'friends to pass this test'
assert type(ret[0]) == models.Tweet
ret = api.timeline.special(reqnum=110)
assert len(ret) == 70
num = randint(1, 70)
ret = api.timeline.special(reqnum=num)
assert len(ret) == num
def test_area(self):
"""api.timeline.area"""
api = self.api
ret = api.timeline.area(country=1, province=44, city=3)
assert len(ret) == 20
assert type(ret[0]) == models.Tweet
assert int(ret[0].countrycode) == 1
assert int(ret[0].provincecode) == 44
assert int(ret[0].citycode) == 3
ret = api.timeline.area(country=1, province=44, city=3, reqnum=110)
assert len(ret) == 100
num = randint(1, 100)
ret = api.timeline.area(country=1, province=44, city=3, reqnum=num)
assert len(ret) == num
def test_users(self):
"""api.timeline.users"""
api = self.api
ret = api.timeline.users(names=['andelf', 'NBA'])
assert len(ret) == 20
assert type(ret[0]) == models.Tweet
assert ret[0].name in ['andelf', 'NBA']
for ct in [1, 2, 4, 8, 0x10]:
contenttype_tester(api.timeline.users,
reqnum=1,
contenttype=ct,
names=['andelf', 'yinyuetai'])
# BUG: max reqnum is 40, or Exception raised
# Update Wed Jun 08 14:35:33 2011:
# seems fixed
# Update Wed Jun 08 15:06:24 2011
# bug again.... 囧rz..
ret = api.timeline.users(names=['andelf', 'NBA'], reqnum=100)
assert len(ret) == 70
num = randint(1, 70)
ret = api.timeline.users(names=['andelf', 'NBA'], reqnum=num)
assert len(ret) == num
def test_homeids(self):
"""api.timeline.homeids"""
api = self.api
ret = api.timeline.homeids()
assert len(ret) == 20
assert type(ret[0]) == models.RetId
assert hasattr(ret[0], 'id')
assert hasattr(ret[0], 'timestamp')
ret = api.timeline.homeids(reqnum=310)
assert len(ret) == 300
num = randint(1, 300)
ret = api.timeline.homeids(reqnum=num)
assert len(ret) == num
def test_userids(self):
"""api.timeline.userids"""
api = self.api
ret = api.timeline.userids('andelf')
assert len(ret) == 20
assert type(ret[0]) == models.RetId
assert hasattr(ret[0], 'id')
assert hasattr(ret[0], 'timestamp')
# use 腾讯薇薇
# BUG: return count is less than reqnum
# and it is not a linear relation..... max 210
# for e.g. :
# 60 => 60, 70 => 70, 80 => 70, 90 => 70, 100 => 70
# 110 => 80, 120 => 90, ... 260 => 200, 181 => 140
# 141 => 111
ret = api.timeline.userids(name='t', reqnum=300)
assert len(ret) == 210
num = randint(1, 210)
ret = api.timeline.userids(name='t', reqnum=num)
assert len(ret) <= num
def test_broadcastids(self):
"""api.timeline.broadcastids"""
api = self.api
ret = api.timeline.broadcastids()
assert len(ret) == 20
assert type(ret[0]) == models.RetId
assert hasattr(ret[0], 'id')
assert hasattr(ret[0], 'timestamp')
# BUG: same bug as api.timeline.userids
ret = api.timeline.broadcastids(reqnum=310)
assert len(ret) == 210
num = randint(1, 300)
ret = api.timeline.broadcastids(reqnum=num)
assert len(ret) <= num
def test_mentionsids(self):
"""api.timeline.mentionsids"""
api = self.api
ret = api.timeline.mentionsids()
assert len(ret) == 20
assert type(ret[0]) == models.RetId
assert hasattr(ret[0], 'id')
assert hasattr(ret[0], 'timestamp')
# BUG: same bug as api.timestamp.userids
ret = api.timeline.mentionsids(reqnum=300)
assert len(ret) <= 210
num = randint(1, 300)
ret = api.timeline.mentionsids(reqnum=num)
assert len(ret) <= num
def test_usersids(self):
"""api.timeline.usersids"""
api = self.api
ret = api.timeline.usersids(['andelf', 't', 'NBA'])
assert len(ret) == 20
assert type(ret[0]) == models.RetId
assert hasattr(ret[0], 'id')
assert hasattr(ret[0], 'timestamp')
ret = api.timeline.usersids(names=['andelf', 't', 'NBA'], reqnum=310)
assert len(ret) == 300
num = randint(1, 300)
ret = api.timeline.usersids(names=['andelf', 't', 'NBA'], reqnum=num)
assert len(ret) == num
# part 2
test_ids = []
class TweetAPITestCase(APITestCase):
def test_show(self):
"""api.tweet.show"""
api = self.api
id = api.timeline.homeids(reqnum=1)[0].id
ret = api.tweet.show(id)
assert type(ret) == models.Tweet
assert ret.id == id
def test_add(self):
"""api.tweet.add"""
api = self.api
ret = api.tweet.add("#pyqqweibo# unittest auto post."
"will be delete later %d" % randint(0, 100),
clientip='127.0.0.1',
jing=123.422889,
wei=41.76627
)
assert type(ret) == models.RetId
assert hasattr(ret, 'id')
assert hasattr(ret, 'timestamp')
test_ids.append(ret.id)
t = ret.as_tweet() # also show
assert t.id == ret.id
assert 'pyqqweibo' in t.origtext
assert t.type == 1
# not implemented yet
assert not bool(t.geo)
def test_delete(self):
"""api.tweet.delete"""
# delete in others
pass
def test_retweet(self):
"""api.tweet.retweet"""
api = self.api
target_id = test_ids[0]
ret = api.tweet.retweet(reid=target_id,
content="test retweet %d" % randint(0, 100),
clientip='127.0.0.1'
)
assert type(ret) == models.RetId
test_ids.append(ret.id)
t = ret.as_tweet()
assert t.id == ret.id
assert t.source.id == target_id
assert t.type == 2
assert 'retweet' in t.origtext
def test_reply(self):
"""api.tweet.reply"""
api = self.api
target_id = test_ids[0]
ret = api.tweet.reply(reid=target_id,
content="测试回复 %d" % randint(0, 100),
clientip='127.0.0.1'
)
assert type(ret) == models.RetId
test_ids.append(ret.id)
t = ret.as_tweet()
assert t.id == ret.id
assert t.source.id == target_id
assert t.type == 4
assert '回复' in t.origtext
def test_addpic(self):
"""api.tweet.addpic"""
api = self.api
ret = api.tweet.addpic("f:/tutu.jpg",
"TOO~~~",
'127.0.0.1')
assert type(ret) == models.RetId
test_ids.append(ret.id)
t = ret.as_tweet()
assert hasattr(t, 'image')
assert len(t.image) == 1
assert 'TOO' in t.origtext
def test_retweetcount(self):
"""apt.tweet.retweetcount"""
api = self.api
ret = api.tweet.retweetcount(ids=[79504073889068,
36575045593232])
assert type(ret) == models.JSON
data = ret.as_dict()
assert '79504073889068' in data
count = data['79504073889068']
assert count > 0
ret0 = api.tweet.retweetcount(ids=79504073889068,
flag=0)
count0 = ret0.as_dict()['79504073889068']
assert count0 > 0
# in some senconds
assert count0 - 10 <= count <= count0
ret1 = api.tweet.retweetcount(ids=79504073889068,
flag=1)
count1 = ret1.as_dict()['79504073889068']
assert count1 > 0
ret2 = api.tweet.retweetcount(ids=79504073889068,
flag=2)
count2 = ret2.as_dict()['79504073889068']
# {u'count': 16511, u'mcount': 294}
assert 'count' in count2
assert 'mcount' in count2
assert count2['count'] - 5 <= count <= count2['count']
assert count2['mcount'] - 5 <= count1 <= count2['mcount']
def test_retweetlist(self):
"""api.tweet.retweetlist"""
api = self.api
ret = api.tweet.retweetlist(rootid='79504073889068')
assert len(ret) == 20
assert type(ret[0]) == models.Tweet
assert ret[0].source.id == '79504073889068'
assert ret.hasnext
ret = api.tweet.retweetlist(rootid='79504073889068',
reqnum=120)
assert len(ret) == 100
num = randint(1, 100)
ret = api.tweet.retweetlist(rootid='79504073889068',
reqnum=num)
assert len(ret) == num
def test_comment(self):
"""api.tweet.comment"""
api = self.api
target_id = test_ids[0]
ret = api.tweet.comment(reid=target_id,
content="测试评论 %d" % randint(0, 100),
clientip='127.0.0.1'
)
assert type(ret) == models.RetId
test_ids.append(ret.id)
t = ret.as_tweet()
assert t.id == ret.id
assert t.source.id == target_id
assert t.type == 7
assert '评论' in t.origtext
def test_addmusic(self):
"""api.tweet.addmusic"""
return
api = self.api
ret = api.tweet.addmusic(url='',
title='',
author='',
content='Song',
clientip='127.0.0.1')
assert type(ret) == models.RetId
test_ids.append(ret.id)
t = ret.as_tweet()
assert hasattr(t, 'music')
assert bool(t.music)
assert 'Song' in t.origtext
def test_addvideo(self):
"""api.tweet.addvideo"""
return
api = self.api
ret = api.tweet.addvideo(url='',
content='Video',
clientip='127.0.0.1')
assert type(ret) == models.RetId
test_ids.append(ret.id)
t = ret.as_tweet()
assert hasattr(t, 'video')
assert bool(t.video)
assert type(t.video) == models.Video
assert 'Video' in t.origtext
def test_list(self):
"""api.tweet.list"""
api = self.api
ret = api.tweet.list(ids=[79504073889068,
36575045593232])
assert len(ret) == 2
assert type(ret[0]) == models.Tweet
assert not ret.hasnext
for t in ret:
assert t.id in ['79504073889068', '36575045593232']
class UserAPITestCase(APITestCase):
def test_info(self):
"""api.user.info"""
api = self.api
ret = api.user.info()
assert type(ret) == models.User
def test_update(self):
"""api.user.update"""
api = self.api
ret = api.user.info()
old_intro = ret.introduction
ret.introduction = '#pyqqweibo# powered!'
ret.update() # use model interface
ret = api.user.info()
assert ret.introduction == '#pyqqweibo# powered!'
ret.introduction = old_intro
ret.update()
def test_updatehead(self):
"""api.user.updatehead"""
# TODO: implement this
api = self.api
def test_userinfo(self):
"""api.user.userinfo"""
api = self.api
ret = api.user.userinfo(name='t')
assert type(ret) == models.User
assert ret.name == 't'
class FriendsAPITestCase(APITestCase):
def test_fanslist(self):
"""api.friends.fanslist"""
api = self.api
ret = api.friends.fanslist()
assert len(ret) == 30
assert type(ret[0]) == models.User
assert ret.hasnext
fansnum = api.user.info().fansnum
ret = api.friends.fanslist(startindex=fansnum-1)
assert not ret.hasnext
ret = api.friends.fanslist(reqnum=100)
assert len(ret) == 30
num = randint(1, 30)
ret = api.friends.fanslist(reqnum=num)
assert len(ret) == num
def test_idollist(self):
"""api.friends.idollist"""
api = self.api
ret = api.friends.idollist()
assert len(ret) == 30
assert type(ret[0]) == models.User
assert ret.hasnext
idolnum = api.user.info().idolnum
ret = api.friends.idollist(startindex=idolnum-1)
assert not ret.hasnext
ret = api.friends.idollist(reqnum=100)
assert len(ret) == 30
num = randint(1, 30)
ret = api.friends.idollist(reqnum=num)
assert len(ret) == num
def test_blacklist(self):
"""api.friends.blacklist"""
api = self.api
ret = api.friends.blacklist()
assert len(ret) > 0, "add someone to blacklist to pass test"
assert type(ret[0]) == models.User
def test_speciallist(self):
"""api.friends.speciallist"""
api = self.api
ret = api.friends.speciallist()
assert len(ret) > 0, "add someone to special list to pass test"
assert type(ret[0]) == models.User
def test_add(self):
"""api.friends.add"""
api = self.api
ret = api.friends.add(name='fledna')
assert ret is None
info = api.user.userinfo(name='fledna')
assert info.ismyidol
def test_delete(self):
"""api.friends.delete"""
api = self.api
ret = api.friends.delete(name='t')
assert ret is None
info = api.user.userinfo(name='t')
assert not info.ismyidol
try:
# BUG: will cause errcode=65. reason unkown
api.friends.add(name='t')
except:
pass
def test_addspecial(self):
"""api.friends.addspecial"""
api = self.api
ret = api.friends.addspecial('t')
assert ret is None
def test_deletespecial(self):
"""api.friends.deletespecial"""
api = self.api
try:
ret = api.friends.add('t')
ret = api.friends.addspecial('t')
except:
pass
ret = api.friends.deletespecial('t')
assert ret is None
def test_addblacklist(self):
"""api.friends.addblacklist"""
api = self.api
ret = api.friends.addblacklist(name='t')
assert ret is None
info = api.user.userinfo(name='t')
assert info.ismyblack
def test_deleteblacklist(self):
"""api.friends.deleteblacklist"""
api = self.api
ret = api.friends.deleteblacklist(name='t')
assert ret is None
info = api.user.userinfo(name='t')
assert not info.ismyblack
def test_check(self):
"""self.friends.check"""
api = self.api
ret = api.friends.check(names=['t', 'andelf', 'NBA'])
assert type(ret) == models.JSON
assert type(ret.t) == bool
assert type(ret.as_dict()['andelf']) == bool
def test_userfanslist(self):
"""api.friends.userfanslist"""
api = self.api
ret = api.friends.userfanslist('NBA')
assert len(ret) == 30
assert type(ret[0]) == models.User
assert ret.hasnext
# BUG: if too large, cause ret=4, errcode=0
fansnum = api.user.userinfo('NBA').fansnum
ret = api.friends.userfanslist('NBA', startindex=fansnum-1)
assert not ret.hasnext
ret = api.friends.userfanslist('NBA', reqnum=100)
assert len(ret) == 30
num = randint(1, 30)
ret = api.friends.userfanslist('NBA', reqnum=num)
assert len(ret) == num
def test_useridollist(self):
"""api.friends.useridollist"""
api = self.api
ret = api.friends.useridollist('andelf')
assert len(ret) == 30
assert type(ret[0]) == models.User
assert ret.hasnext
idolnum = api.user.userinfo('andelf').idolnum
ret = api.friends.useridollist('andelf', startindex=idolnum-1)
assert not ret.hasnext
ret = api.friends.useridollist('andelf', reqnum=100)
assert len(ret) == 30
num = randint(1, 30)
ret = api.friends.useridollist('andelf', reqnum=num)
assert len(ret) == num
def test_userspeciallist(self):
"""api.friends.userspeciallist"""
api = self.api
ret = api.friends.useridollist('andelf')
assert len(ret) > 0
assert type(ret[0]) == models.User
if len(ret)< 30:
assert not ret.hasnext
if __name__ == '__main__':
unittest.main(verbosity=2)
#suite = unittest.TestLoader().loadTestsFromTestCase(FriendsAPITestCase)
#unittest.TextTestRunner(verbosity=2).run(suite)
if 1:
print ('\nbegin clean up...')
APITestCase.setUpClass()
for i in test_ids:
ret = APITestCase.api.tweet.delete(i)
print ('delete id={}'.format(i))
assert ret.id == i
| mit |
shizeeg/pyicqt | tools/migrate.py | 1 | 5978 | #!/usr/bin/env python
#
# Spool Migration Script
#
# This script takes two arguments. The first is either "dump" or "restore".
# The second argument will be a file that your xdb will be dumped to, in the
# case of a "dump", or restored from, in the case of a "restore". The
# spool config used will be what is in config.xml in the root of the
# distribution. This script is expected to be run from this directory.
#
# For example, if you are currently using the xmlfiles xdb backend, you
# would first have a config.xml file that is configured for that. You would
# then type './migrate.py dump mydump'. This will produce a long-ish file
# in XML format that contains all of the data from your spool.
#
# Next, lets say you wanted to switch to the MySQL xdb backend. You would
# first make sure that you have it set up correctly as per the instructions.
# (you would have had to create the tables using db-setup.mysql in this
# directory) Then you would set up your config.xml appropriately and
# run './migrate restore mydump'. This will import the xdb roster into
# your new spool.
#
# WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
# A restore -will- write over entries from your current spool.
# Please make sure to make a backup if you wish to do so.
# WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
#
# This script accepts a subset of the command line flags that the transport
# itself accepts. Please run it with '-h' to see the available options.
#
transportname = "PyICQt"
dumpversion = "1.0.1"
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
del sys.setdefaultencoding
sys.path.append("../src")
import debug
import getopt
import config
import utils
def showhelp():
print "./migrate.py [options] cmd file"
print "options:"
print " -h print this help"
print " -c <file> read configuration from this file"
print " -o <var>=<setting> set config var to setting"
print " -d print debugging output"
print "cmd:";
print " dump dump spool to file"
print " restore restore spool from file"
sys.exit(0)
conffile = "config.xml"
options = {}
opts, args = getopt.getopt(sys.argv[1:], "c:do:h", ["config=", "debug", "option=", "help"])
for o, v in opts:
if o in ("-c", "--config"):
conffile = v
elif o in ("-d", "--debug"):
config.debugOn = True
elif o in ("-o", "--option"):
var, setting = v.split("=", 2)
options[var] = setting
elif o in ("-h", "--help"):
showhelp()
reload(debug)
if len(args) != 2:
showhelp()
import xmlconfig
xmlconfig.Import(conffile, options)
from twisted.words.xish.domish import Element
if args[0] == "dump":
import xdb
from tlib import oscar
myxdb = xdb.XDB(config.jid)
out = Element((None, "pydump"))
out["transport"] = transportname
out["version"] = dumpversion
for jid in myxdb.getRegistrationList():
print "Dumping "+jid+"..."
userpass = myxdb.getRegistration(jid)
if not userpass: continue
user = out.addElement("user")
user["jid"] = jid
user["username"] = userpass[0]
user["password"] = userpass[1]
prefs = user.addElement("preferences")
settinglist = myxdb.getSettingList(jid)
if settinglist:
for pref in settinglist:
thispref = prefs.addElement(pref)
thispref.addContent(settinglist[pref])
cprefs = user.addElement("cpreferences")
csettinglist = myxdb.getCSettingList(jid)
if csettinglist:
for pref in csettinglist:
thispref = cprefs.addElement("item")
thispref.attributes['variable'] = pref
thispref.addContent(csettinglist[pref])
xstatuses = user.addElement("xstatuses")
for i in range(0, len(oscar.X_STATUS_CAPS)):
title, desc = myxdb.getXstatusText(jid, i)
if title or desc:
xstatus = xstatuses.addElement("item")
xstatus.attributes['number'] = str(i)
if title:
xstatus.attributes['title'] = str(title)
else:
xstatus.attributes['title'] = ''
if desc:
xstatus.addContent(str(desc))
else:
xstatus.addContent('')
listtypes = myxdb.getListTypes(jid)
if listtypes:
for listtype in listtypes:
list = user.addElement("list")
list["type"] = listtype
listentries = myxdb.getList(listtype, jid)
if not listentries: continue
for entry in listentries:
listentry = list.addElement("entry")
listentry["name"] = entry[0]
attrs = entry[1]
for attr in attrs:
entryattr = listentry.addElement(attr)
entryattr.addContent(attrs[attr])
f = open(args[1], "w")
f.write(out.toXml())
f.close()
elif args[0] == "restore":
import xdb
myxdb = xdb.XDB(config.jid)
input = utils.parseFile(args[1])
if input.getAttribute("transport") != transportname:
print "The dump file specified does not appear to be for this transport."
sys.exit(0)
for child in input.elements():
jid = child.getAttribute("jid")
print "Restoring "+jid+"..."
doesexist = myxdb.getRegistration(jid)
if doesexist:
myxdb.removeRegistration(jid)
username = child.getAttribute("username")
password = child.getAttribute("password")
myxdb.setRegistration(jid, username, password)
for child2 in child.elements():
if child2.name == "preferences":
for pref in child2.elements():
myxdb.setSetting(jid, pref, pref.__str__())
elif child2.name == "list":
type = child2.getAttribute("type")
for entry in child2.elements():
name = entry.getAttribute("name")
attrs = {}
for attr in entry.elements():
attrs[attr.name] = attr.__str__()
myxdb.setListEntry(type, jid, name, payload=attrs)
elif child2.name == "xstatuses":
for pref in child2.elements():
if pref.name == 'item':
myxdb.setXstatusText(jid, pref.getAttribute('number'), pref.getAttribute('title'), pref.__str__())
elif child2.name == "cpreferences":
for pref in child2.elements():
if pref.name == 'item':
myxdb.setCSetting(jid, pref.getAttribute('variable'), pref.__str__())
else:
showhelp()
| gpl-2.0 |
jmartinezchaine/OpenERP | openerp/pychart/afm/Courier_BoldOblique.py | 15 | 1518 | # -*- coding: utf-8 -*-
# AFM font Courier-BoldOblique (path: /usr/share/fonts/afms/adobe/pcrbo8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Courier-BoldOblique"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 600, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 500, 600, 500, 500, 500, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 600, 500, 500, 500, 600, 500, 500, 600, 600, 600, 600, )
| agpl-3.0 |
jonathanslenders/asyncssh | examples/simple_keyed_server.py | 2 | 1973 | #!/usr/bin/env python3.4
#
# Copyright (c) 2013-2015 by Ron Frederick <ronf@timeheart.net>.
# All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License v1.0 which accompanies this
# distribution and is available at:
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ron Frederick - initial implementation, API, and documentation
# To run this program, the file ``ssh_host_key`` must exist with an SSH
# private key in it to use as a server host key. An SSH host certificate
# can optionally be provided in the file ``ssh_host_key-cert.pub``.
#
# Authentication requires the directory authorized_keys to exist with
# files in it named based on the username containing the client keys
# and certificate authority keys which are accepted for that user.
import asyncio, asyncssh, sys
class MySSHServerSession(asyncssh.SSHServerSession):
def connection_made(self, chan):
self._chan = chan
def shell_requested(self):
return True
def session_started(self):
self._chan.write('Welcome to my SSH server, %s!\r\n' %
self._chan.get_extra_info('username'))
self._chan.exit(0)
class MySSHServer(asyncssh.SSHServer):
def connection_made(self, conn):
self._conn = conn
def begin_auth(self, username):
try:
self._conn.set_authorized_keys('authorized_keys/%s' % username)
except IOError:
pass
return True
def session_requested(self):
return MySSHServerSession()
@asyncio.coroutine
def start_server():
yield from asyncssh.create_server(MySSHServer, '', 8022,
server_host_keys=['ssh_host_key'])
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(start_server())
except (OSError, asyncssh.Error) as exc:
sys.exit('Error starting server: ' + str(exc))
loop.run_forever()
| epl-1.0 |
OmnInfinity/volatility | volatility/plugins/volshell.py | 44 | 19585 | # Volatility
# Copyright (C) 2008-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: AAron Walters and Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: awalters@4tphi.net,bdolangavitt@wesleyan.edu
@organization: Volatility Foundation
"""
import struct
import sys
import volatility.plugins.common as common
import volatility.win32 as win32
import volatility.utils as utils
import volatility.obj as obj
try:
import distorm3 #pylint: disable-msg=F0401
except ImportError:
pass
class volshell(common.AbstractWindowsCommand):
"""Shell in the memory image"""
# Declare meta information associated with this plugin
meta_info = {}
meta_info['author'] = 'Brendan Dolan-Gavitt'
meta_info['copyright'] = 'Copyright (c) 2007,2008 Brendan Dolan-Gavitt'
meta_info['contact'] = 'bdolangavitt@wesleyan.edu'
meta_info['license'] = 'GNU General Public License 2.0'
meta_info['url'] = 'http://moyix.blogspot.com/'
meta_info['os'] = 'WIN_32_XP_SP2'
meta_info['version'] = '1.3'
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
config.add_option('OFFSET', short_option = 'o', default = None,
help = 'EPROCESS Offset (in hex) in kernel address space',
action = 'store', type = 'int')
config.add_option('IMNAME', short_option = 'n', default = None,
help = 'Operate on this Process name',
action = 'store', type = 'str')
config.add_option('PID', short_option = 'p', default = None,
help = 'Operate on these Process IDs (comma-separated)',
action = 'store', type = 'str')
self.addrspace = None
self.proc = None
def getpidlist(self):
return win32.tasks.pslist(self.addrspace)
def getmodules(self):
return win32.modules.lsmod(self.addrspace)
def context_display(self):
print "Current context: process {0}, pid={1}, ppid={2} DTB={3:#x}".format(self.proc.ImageFileName,
self.proc.UniqueProcessId.v(),
self.proc.InheritedFromUniqueProcessId.v(),
self.proc.Pcb.DirectoryTableBase.v())
def ps(self, procs = None):
print "{0:16} {1:6} {2:6} {3:8}".format("Name", "PID", "PPID", "Offset")
for eproc in procs or self.getpidlist():
print "{0:16} {1:<6} {2:<6} {3:#08x}".format(eproc.ImageFileName,
eproc.UniqueProcessId.v(),
eproc.InheritedFromUniqueProcessId.v(),
eproc.obj_offset)
def modules(self, modules = None):
if self.addrspace.profile.metadata.get('memory_model', '32bit') == '32bit':
print "{0:10} {1:10} {2}".format("Offset", "Base", "Name")
else:
print "{0:18} {1:18} {2}".format("Offset", "Base", "Name")
for module in modules or self.getmodules():
print "{0:#08x} {1:#08x} {2}".format(module.obj_offset,
module.DllBase,
module.FullDllName or module.BaseDllName or '')
def set_context(self, offset = None, pid = None, name = None):
if pid is not None:
offsets = []
for p in self.getpidlist():
if p.UniqueProcessId.v() == pid:
offsets.append(p)
if not offsets:
print "Unable to find process matching pid {0}".format(pid)
return
elif len(offsets) > 1:
print "Multiple processes match {0}, please specify by offset".format(pid)
print "Matching processes:"
self.ps(offsets)
return
else:
offset = offsets[0].v()
elif name is not None:
offsets = []
for p in self.getpidlist():
if p.ImageFileName.find(name) >= 0:
offsets.append(p)
if not offsets:
print "Unable to find process matching name {0}".format(name)
return
elif len(offsets) > 1:
print "Multiple processes match name {0}, please specify by PID or offset".format(name)
print "Matching processes:"
self.ps(offsets)
return
else:
offset = offsets[0].v()
elif offset is None:
print "Must provide one of: offset, name, or pid as a argument."
return
self.proc = obj.Object("_EPROCESS", offset = offset, vm = self.addrspace)
self.context_display()
def render_text(self, _outfd, _data):
self.addrspace = utils.load_as(self._config)
if not self._config.OFFSET is None:
self.set_context(offset = self._config.OFFSET)
self.context_display()
elif self._config.PID is not None:
# FIXME: volshell is really not intended to switch into multiple
# process contexts at once, so it doesn't make sense to use a csv
# pid list. However, the linux and mac volshell call the respective
# linux_pslist and mac_pslist which require a csv pidlist. After
# the 2.3 release we should close this along with issue 375.
pidlist = [int(p) for p in self._config.PID.split(',')]
for p in pidlist:
self.set_context(pid = p)
break
elif self._config.IMNAME is not None:
self.set_context(name = self._config.IMNAME)
else:
# Just use the first process, whatever it is
for p in self.getpidlist():
self.set_context(offset = p.v())
break
# Functions inside the shell
def cc(offset = None, pid = None, name = None):
"""Change current shell context.
This function changes the current shell context to to the process
specified. The process specification can be given as a virtual address
(option: offset), PID (option: pid), or process name (option: name).
If multiple processes match the given PID or name, you will be shown a
list of matching processes, and will have to specify by offset.
"""
self.set_context(offset = offset, pid = pid, name = name)
def db(address, length = 0x80, space = None):
"""Print bytes as canonical hexdump.
This function prints bytes at the given virtual address as a canonical
hexdump. The address will be translated in the current process context
(see help on cc for information on how to change contexts).
The length parameter (default: 0x80) specifies how many bytes to print,
the width parameter (default: 16) allows you to change how many bytes per
line should be displayed, and the space parameter allows you to
optionally specify the address space to read the data from.
"""
if not space:
space = self.proc.get_process_address_space()
#if length % 4 != 0:
# length = (length+4) - (length%4)
data = space.read(address, length)
if not data:
print "Memory unreadable at {0:08x}".format(address)
return
for offset, hexchars, chars in utils.Hexdump(data):
print "{0:#010x} {1:<48} {2}".format(address + offset, hexchars, ''.join(chars))
def dd(address, length = 0x80, space = None):
"""Print dwords at address.
This function prints the data at the given address, interpreted as
a series of dwords (unsigned four-byte integers) in hexadecimal.
The address will be translated in the current process context
(see help on cc for information on how to change contexts).
The optional length parameter (default: 0x80) controls how many bytes
to display, and space allows you to optionally specify the address space
to read the data from.
"""
if not space:
space = self.proc.get_process_address_space()
# round up to multiple of 4
if length % 4 != 0:
length = (length + 4) - (length % 4)
data = space.read(address, length)
if not data:
print "Memory unreadable at {0:08x}".format(address)
return
dwords = []
for i in range(0, length, 4):
(dw,) = struct.unpack("<L", data[i:i + 4])
dwords.append(dw)
if len(dwords) % 4 == 0: lines = len(dwords) / 4
else: lines = len(dwords) / 4 + 1
for i in range(lines):
ad = address + i * 0x10
lwords = dwords[i * 4:i * 4 + 4]
print ("{0:08x} ".format(ad)) + " ".join("{0:08x}".format(l) for l in lwords)
def dq(address, length = 0x80, space = None):
"""Print qwords at address.
This function prints the data at the given address, interpreted as
a series of qwords (unsigned eight-byte integers) in hexadecimal.
The address will be translated in the current process context
(see help on cc for information on how to change contexts).
The optional length parameter (default: 0x80) controls how many bytes
to display, and space allows you to optionally specify the address space
to read the data from.
"""
if not space:
space = self.proc.get_process_address_space()
# round up
if length % 8 != 0:
length = (length + 8) - (length % 8)
qwords = obj.Object("Array", targetType = "unsigned long long",
offset = address, count = length / 8, vm = space)
if not qwords:
print "Memory unreadable at {0:08x}".format(address)
return
for qword in qwords:
print "{0:#x} {1:#x}".format(qword.obj_offset, qword.v())
def ps():
"""Print a process listing.
Prints a process listing with PID, PPID, image name, and offset.
"""
self.ps()
def modules():
"""Print a module listing.
Prints a module listing with base, offset, name etc
"""
self.modules()
def sc():
"""Show the current context.
Show the current process information.
"""
self.context_display()
def list_entry(head, objname, offset = -1, fieldname = None, forward = True):
"""Traverse a _LIST_ENTRY.
Traverses a _LIST_ENTRY starting at virtual address head made up of
objects of type objname. The value of offset should be set to the
offset of the _LIST_ENTRY within the desired object."""
vm = self.proc.get_process_address_space()
seen = set()
if fieldname:
offset = vm.profile.get_obj_offset(objname, fieldname)
#if typ != "_LIST_ENTRY":
# print ("WARN: given field is not a LIST_ENTRY, attempting to "
# "continue anyway.")
lst = obj.Object("_LIST_ENTRY", head, vm)
seen.add(lst)
if not lst.is_valid():
return
while True:
if forward:
lst = lst.Flink
else:
lst = lst.Blink
if not lst.is_valid():
return
if lst in seen:
break
else:
seen.add(lst)
nobj = obj.Object(objname, lst.obj_offset - offset, vm)
yield nobj
def dt(objct, address = None, space = None):
"""Describe an object or show type info.
Show the names and values of a complex object (struct). If the name of a
structure is passed, show the struct's members and their types.
You can also pass a type name and an address in order to on-the-fly
interpret a given address as an instance of a particular structure.
Examples:
# Dump the current process object
dt(self.proc)
# Show the _EPROCESS structure
dt('_EPROCESS')
# Overlay an _EPROCESS structure at 0x81234567
dt('_EPROCESS', 0x81234567)
"""
profile = (space or self.proc.obj_vm).profile
if address is not None:
objct = obj.Object(objct, address, space or self.proc.get_process_address_space())
if isinstance(objct, str):
size = profile.get_obj_size(objct)
membs = [ (profile.get_obj_offset(objct, m), m, profile.vtypes[objct][1][m][1]) for m in profile.vtypes[objct][1] ]
print repr(objct), "({0} bytes)".format(size)
for o, m, t in sorted(membs):
print "{0:6}: {1:30} {2}".format(hex(o), m, t)
elif isinstance(objct, obj.BaseObject):
membs = [ (o, m) for m, (o, _c) in objct.members.items() ]
print repr(objct)
offsets = []
for o, m in sorted(membs):
val = getattr(objct, m)
if isinstance(val, list):
val = [ str(v) for v in val ]
# Handle a potentially callable offset
if callable(o):
o = o(objct) - objct.obj_offset
offsets.append((o, m, val))
# Deal with potentially out of order offsets
offsets.sort(key = lambda x: x[0])
for o, m, val in offsets:
print "{0:6}: {1:30} {2}".format(hex(o), m, val)
elif isinstance(objct, obj.NoneObject):
print "ERROR: could not instantiate object"
print
print "Reason: ", objct.reason
else:
print "ERROR: first argument not an object or known type"
print
print "Usage:"
print
hh(dt)
def dis(address, length = 128, space = None, mode = None):
"""Disassemble code at a given address.
Disassembles code starting at address for a number of bytes
given by the length parameter (default: 128).
Note: This feature requires distorm, available at
http://www.ragestorm.net/distorm/
The mode is '16bit', '32bit' or '64bit'. If not supplied, the disasm
mode is taken from the profile.
"""
if not sys.modules.has_key("distorm3"):
print "ERROR: Disassembly unavailable, distorm not found"
return
if not space:
space = self.proc.get_process_address_space()
if mode == None:
mode = space.profile.metadata.get('memory_model', '32bit')
# we'll actually allow the possiblility that someone passed a correct mode
if mode not in [distorm3.Decode16Bits, distorm3.Decode32Bits, distorm3.Decode64Bits]:
if mode == '16bit':
mode = distorm3.Decode16Bits
elif mode == '32bit':
mode = distorm3.Decode32Bits
else:
mode = distorm3.Decode64Bits
distorm_mode = mode
data = space.read(address, length)
iterable = distorm3.DecodeGenerator(address, data, distorm_mode)
for (offset, _size, instruction, hexdump) in iterable:
print "{0:<#8x} {1:<32} {2}".format(offset, hexdump, instruction)
shell_funcs = {'cc': cc, 'dd': dd, 'db': db, 'ps': ps, 'dt': dt, 'list_entry': list_entry, 'dis': dis, 'dq': dq, 'modules': modules, 'sc': sc,}
def hh(cmd = None):
"""Get help on a command."""
shell_funcs['hh'] = hh
import pydoc
from inspect import getargspec, formatargspec
if not cmd:
print "\nUse self.addrspace for Kernel/Virtual AS"
print "Use self.addrspace.base for Physical AS"
print "Use self.proc to get the current _EPROCESS object"
print " and self.proc.get_process_address_space() for the current process AS"
print " and self.proc.get_load_modules() for the current process DLLs\n"
for f in sorted(shell_funcs):
doc = pydoc.getdoc(shell_funcs[f])
synop, _full = pydoc.splitdoc(doc)
print "{0:40} : {1}".format(f + formatargspec(*getargspec(shell_funcs[f])), synop)
print "\nFor help on a specific command, type 'hh(<command>)'"
elif type(cmd) == str:
try:
doc = pydoc.getdoc(shell_funcs[cmd])
except KeyError:
print "No such command: {0}".format(cmd)
return
print doc
else:
doc = pydoc.getdoc(cmd)
print doc
# Break into shell
banner = "Welcome to volshell! Current memory image is:\n{0}\n".format(self._config.LOCATION)
banner += "To get help, type 'hh()'"
try:
from IPython.Shell import IPShellEmbed #pylint: disable-msg=W0611,F0401
shell = IPShellEmbed([], banner = banner)
shell()
except ImportError:
import code, inspect
frame = inspect.currentframe()
# Try to enable tab completion
try:
import rlcompleter, readline #pylint: disable-msg=W0612
readline.parse_and_bind("tab: complete")
except ImportError:
pass
# evaluate commands in current namespace
namespace = frame.f_globals.copy()
namespace.update(frame.f_locals)
code.interact(banner = banner, local = namespace)
| gpl-2.0 |
gergap/binutils-gdb | gdb/python/lib/gdb/prompt.py | 124 | 4210 | # Extended prompt utilities.
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwdu()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def _prompt_n(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': _prompt_n,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
| gpl-2.0 |
trezorg/django | tests/regressiontests/admin_ordering/tests.py | 50 | 2558 | from django.test import TestCase
from django.contrib.admin.options import ModelAdmin
from models import Band, Song, SongInlineDefaultOrdering, SongInlineNewOrdering
class TestAdminOrdering(TestCase):
"""
Let's make sure that ModelAdmin.queryset uses the ordering we define in
ModelAdmin rather that ordering defined in the model's inner Meta
class.
"""
def setUp(self):
b1 = Band(name='Aerosmith', bio='', rank=3)
b1.save()
b2 = Band(name='Radiohead', bio='', rank=1)
b2.save()
b3 = Band(name='Van Halen', bio='', rank=2)
b3.save()
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
ma = ModelAdmin(Band, None)
names = [b.name for b in ma.queryset(None)]
self.assertEqual([u'Aerosmith', u'Radiohead', u'Van Halen'], names)
def test_specified_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering, and make sure
it actually changes.
"""
class BandAdmin(ModelAdmin):
ordering = ('rank',) # default ordering is ('name',)
ma = BandAdmin(Band, None)
names = [b.name for b in ma.queryset(None)]
self.assertEqual([u'Radiohead', u'Van Halen', u'Aerosmith'], names)
class TestInlineModelAdminOrdering(TestCase):
"""
Let's make sure that InlineModelAdmin.queryset uses the ordering we define
in InlineModelAdmin.
"""
def setUp(self):
b = Band(name='Aerosmith', bio='', rank=3)
b.save()
self.b = b
s1 = Song(band=b, name='Pink', duration=235)
s1.save()
s2 = Song(band=b, name='Dude (Looks Like a Lady)', duration=264)
s2.save()
s3 = Song(band=b, name='Jaded', duration=214)
s3.save()
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
inline = SongInlineDefaultOrdering(self.b, None)
names = [s.name for s in inline.queryset(None)]
self.assertEqual([u'Dude (Looks Like a Lady)', u'Jaded', u'Pink'], names)
def test_specified_ordering(self):
"""
Let's check with ordering set to something different than the default.
"""
inline = SongInlineNewOrdering(self.b, None)
names = [s.name for s in inline.queryset(None)]
self.assertEqual([u'Jaded', u'Pink', u'Dude (Looks Like a Lady)'], names) | bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_qt4agg.py | 10 | 2177 | """
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os # not used
import sys
import ctypes
import warnings
import matplotlib
from matplotlib.figure import Figure
from .backend_qt5agg import FigureCanvasQTAggBase as _FigureCanvasQTAggBase
from .backend_agg import FigureCanvasAgg
from .backend_qt4 import QtCore
from .backend_qt4 import FigureManagerQT
from .backend_qt4 import FigureCanvasQT
from .backend_qt4 import NavigationToolbar2QT
##### not used
from .backend_qt4 import show
from .backend_qt4 import draw_if_interactive
from .backend_qt4 import backend_version
######
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt4agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAggBase(_FigureCanvasQTAggBase):
def __init__(self, figure):
self._agg_draw_pending = False
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
FigureCanvasQT.__init__(self, figure)
FigureCanvasQTAggBase.__init__(self, figure)
FigureCanvasAgg.__init__(self, figure)
self._drawRect = None
self.blitbox = []
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
| gpl-3.0 |
welikecloud/bigtop | bigtop-packages/src/charm/spark/layer-spark/tests/02-smoke-test.py | 2 | 1647 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Smoke test for Apache Bigtop Spark.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('spark')
cls.d.setup(timeout=1800)
cls.d.sentry.wait_for_messages({'spark': re.compile('ready')}, timeout=1800)
cls.spark = cls.d.sentry['spark'][0]
def test_spark(self):
"""
Validate Spark by running the smoke-test action.
"""
uuid = self.spark.run_action('smoke-test')
result = self.d.action_fetch(uuid, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Spark smoke-test failed: %s' % result)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
crazyyoung01/vv | vn.how/tick2trade/vn.trader_t2t/riskManager/rmEngine.py | 12 | 7884 | # encoding: UTF-8
'''
本文件中实现了风控引擎,用于提供一系列常用的风控功能:
1. 委托流控(单位时间内最大允许发出的委托数量)
2. 总成交限制(每日总成交数量限制)
3. 单笔委托的委托数量控制
'''
import json
import os
import platform
from eventEngine import *
from vtConstant import *
from vtGateway import VtLogData
########################################################################
class RmEngine(object):
"""风控引擎"""
settingFileName = 'RM_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
settingFileName = os.path.join(path, settingFileName)
name = u'风控模块'
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 是否启动风控
self.active = False
# 流控相关
self.orderFlowCount = EMPTY_INT # 单位时间内委托计数
self.orderFlowLimit = EMPTY_INT # 委托限制
self.orderFlowClear = EMPTY_INT # 计数清空时间(秒)
self.orderFlowTimer = EMPTY_INT # 计数清空时间计时
# 单笔委托相关
self.orderSizeLimit = EMPTY_INT # 单笔委托最大限制
# 成交统计相关
self.tradeCount = EMPTY_INT # 当日成交合约数量统计
self.tradeLimit = EMPTY_INT # 当日成交合约数量限制
# 活动合约相关
self.workingOrderLimit = EMPTY_INT # 活动合约最大限制
self.loadSetting()
self.registerEvent()
#----------------------------------------------------------------------
def loadSetting(self):
"""读取配置"""
with open(self.settingFileName) as f:
d = json.load(f)
# 设置风控参数
self.active = d['active']
self.orderFlowLimit = d['orderFlowLimit']
self.orderFlowClear = d['orderFlowClear']
self.orderSizeLimit = d['orderSizeLimit']
self.tradeLimit = d['tradeLimit']
self.workingOrderLimit = d['workingOrderLimit']
#----------------------------------------------------------------------
def saveSetting(self):
"""保存风控参数"""
with open(self.settingFileName, 'w') as f:
# 保存风控参数
d = {}
d['active'] = self.active
d['orderFlowLimit'] = self.orderFlowLimit
d['orderFlowClear'] = self.orderFlowClear
d['orderSizeLimit'] = self.orderSizeLimit
d['tradeLimit'] = self.tradeLimit
d['workingOrderLimit'] = self.workingOrderLimit
# 写入json
jsonD = json.dumps(d, indent=4)
f.write(jsonD)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TRADE, self.updateTrade)
self.eventEngine.register(EVENT_TIMER, self.updateTimer)
#----------------------------------------------------------------------
def updateTrade(self, event):
"""更新成交数据"""
trade = event.dict_['data']
self.tradeCount += trade.volume
#----------------------------------------------------------------------
def updateTimer(self, event):
"""更新定时器"""
self.orderFlowTimer += 1
# 如果计时超过了流控清空的时间间隔,则执行清空
if self.orderFlowTimer >= self.orderFlowClear:
self.orderFlowCount = 0
self.orderFlowTimer = 0
#----------------------------------------------------------------------
def writeRiskLog(self, content):
"""快速发出日志事件"""
# 发出报警提示音
if platform.uname() == 'Windows':
import winsound
winsound.PlaySound("SystemHand", winsound.SND_ASYNC)
# 发出日志事件
log = VtLogData()
log.logContent = content
log.gatewayName = self.name
event = Event(type_=EVENT_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def checkRisk(self, orderReq):
"""检查风险"""
# 如果没有启动风控检查,则直接返回成功
if not self.active:
return True
# 检查委托数量
if orderReq.volume > self.orderSizeLimit:
self.writeRiskLog(u'单笔委托数量%s,超过限制%s'
%(orderReq.volume, self.orderSizeLimit))
return False
# 检查成交合约量
if self.tradeCount >= self.tradeLimit:
self.writeRiskLog(u'今日总成交合约数量%s,超过限制%s'
%(self.tradeCount, self.tradeLimit))
return False
# 检查流控
if self.orderFlowCount >= self.orderFlowLimit:
self.writeRiskLog(u'委托流数量%s,超过限制每%s秒%s'
%(self.orderFlowCount, self.orderFlowClear, self.orderFlowLimit))
return False
# 检查总活动合约
workingOrderCount = len(self.mainEngine.getAllWorkingOrders())
if workingOrderCount >= self.workingOrderLimit:
self.writeRiskLog(u'当前活动委托数量%s,超过限制%s'
%(workingOrderCount, self.workingOrderLimit))
return False
# 对于通过风控的委托,增加流控计数
self.orderFlowCount += 1
return True
#----------------------------------------------------------------------
def clearOrderFlowCount(self):
"""清空流控计数"""
self.orderFlowCount = 0
self.writeRiskLog(u'清空流控计数')
#----------------------------------------------------------------------
def clearTradeCount(self):
"""清空成交数量计数"""
self.tradeCount = 0
self.writeRiskLog(u'清空总成交计数')
#----------------------------------------------------------------------
def setOrderFlowLimit(self, n):
"""设置流控限制"""
self.orderFlowLimit = n
#----------------------------------------------------------------------
def setOrderFlowClear(self, n):
"""设置流控清空时间"""
self.orderFlowClear = n
#----------------------------------------------------------------------
def setOrderSizeLimit(self, n):
"""设置委托最大限制"""
self.orderSizeLimit = n
#----------------------------------------------------------------------
def setTradeLimit(self, n):
"""设置成交限制"""
self.tradeLimit = n
#----------------------------------------------------------------------
def setWorkingOrderLimit(self, n):
"""设置活动合约限制"""
self.workingOrderLimit = n
#----------------------------------------------------------------------
def switchEngineStatus(self):
"""开关风控引擎"""
self.active = not self.active
if self.active:
self.writeRiskLog(u'风险管理功能启动')
else:
self.writeRiskLog(u'风险管理功能停止')
| mit |
fpy171/django | setup.py | 195 | 3257 | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name='Django',
version=version,
url='http://www.djangoproject.com/',
author='Django Software Foundation',
author_email='foundation@djangoproject.com',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
extras_require={
"bcrypt": ["bcrypt"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
| bsd-3-clause |
imsplitbit/nova | nova/api/openstack/compute/plugins/v3/used_limits.py | 12 | 3299 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import quota
QUOTAS = quota.QUOTAS
XMLNS = "http://docs.openstack.org/compute/ext/used_limits/api/v3"
ALIAS = "os-used-limits"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class UsedLimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
root.set('{%s}usedLimits' % XMLNS, '%s:usedLimits' % ALIAS)
return xmlutil.SlaveTemplate(root, 1, nsmap={ALIAS: XMLNS})
class UsedLimitsController(wsgi.Controller):
@staticmethod
def _reserved(req):
try:
return int(req.GET['reserved'])
except (ValueError, KeyError):
return False
@wsgi.extends
def index(self, req, resp_obj):
context = req.environ['nova.context']
authorize(context)
resp_obj.attach(xml=UsedLimitsTemplate())
project_id = self._project_id(context, req)
quotas = QUOTAS.get_project_quotas(context, project_id, usages=True)
quota_map = {
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
'totalFloatingIpsUsed': 'floating_ips',
'totalSecurityGroupsUsed': 'security_groups',
}
used_limits = {}
for display_name, quota in quota_map.iteritems():
if quota in quotas:
reserved = (quotas[quota]['reserved']
if self._reserved(req) else 0)
used_limits[display_name] = quotas[quota]['in_use'] + reserved
resp_obj.obj['limits']['absolute'].update(used_limits)
def _project_id(self, context, req):
if 'tenant_id' in req.GET:
tenant_id = req.GET.get('tenant_id')
target = {
'project_id': tenant_id,
'user_id': context.user_id
}
authorize(context, target=target, action='tenant')
return tenant_id
return context.project_id
class UsedLimits(extensions.V3APIExtensionBase):
"""Provide data on limited resources that are being used."""
name = "UsedLimits"
alias = ALIAS
namespace = XMLNS
version = 1
def get_controller_extensions(self):
controller = UsedLimitsController()
limits_ext = extensions.ControllerExtension(self, 'limits',
controller=controller)
return [limits_ext]
def get_resources(self):
return []
| apache-2.0 |
outbig/DAFGA | v1.0/dafga_refDB.py | 1 | 5933 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
# Author: Yongkyu Kim, PhD
Max Planck Institute for terrestrial microbiology
#
# Date: 2014-04-02
# Version: 1.0
#
# Try 'dafga_refDB.py -h' for more information and See manual
#
# Purpose:
# Bugs: Please report to https://github.com/outbig/DAFGA/issues?state=open
"""
from Bio import SeqIO, Entrez
from argparse import ArgumentParser
import os
def make_directory(dirname):
try:
os.makedirs(dirname)
except OSError:
raise
return os.path.abspath(dirname)
def delete_contig_line_from_gbk(gp): # delete unreconizable lines by bioptyhon
processed = os.path.join(o_dir,"processed.gp")
with open(processed,"w") as new_gp:
for line in open(gp,"r"):
if line.startswith("CONTIG"):
pass
elif line.startswith(" SecStr"):
pass
elif line.startswith(" Het"):
pass
elif line.startswith(" Bond"):
pass
else:
new_gp.write(line)
return processed
def parsing_gp(gp, LENGTH):
print '\n ...Parsing reference sequnece information in gp format...\n'
source_strain, ref_taxa_xref = {}, {}
with open(prefix+"_ref_seqs.fasta","w") as refseq:
for record in SeqIO.parse(gp,"genbank"):
source = record.features[0].qualifiers
if "culture" not in record.description:
source_strain_info = ""
taxa_xref = [x for x in source["db_xref"] if "taxon:" in x]
xref = taxa_xref[0].split("taxon:")[-1]
ref_taxa_xref[record.name] = xref
refseq.write(">"+record.name+"\t"+record.description.strip(".")+"\t"+xref+"\n")
refseq.write(str(record.seq)+"\n")
if len(record.seq) >= int(LENGTH):
if source.get("strain"):
if source["strain"][0] in source["organism"][0]:
source_strain_info = source["organism"][0]
else:
source_strain_info = source["organism"][0]+" "+source["strain"][0].split(" ")[-1]
source_strain[record.name] = [source_strain_info, record.seq]
print 'Done\n'
return ref_taxa_xref, source_strain, refseq
def efetch_from_taxonomy(xref): # retreive full taxonomic lineage from taxonomy database
with open(prefix+"_ID_to_taxonomy.txt","w") as outfile:
handle_taxon = Entrez.efetch(db="taxonomy", id=xref.values(), retmode="xml")
records = Entrez.read(handle_taxon)
handle_taxon.close()
print "No. of reference sequences: {0}\n".format(len(records))
print " ...Retrieve taxonomic lineage information... "
outfile.write("ReferenceID\tNCBI_taxanomy\n")
for i, seq in enumerate(xref.keys()):
outfile.write(seq+'\t'+records[i]['Lineage']+'\n')
print "\nDone\n"
def searching_nt(source_strain):
print " ...Searching nt database and fetching 16S rRNA sequences...\n "
print "16S ID\t\tFunGene ID\tDescription\t\t16S length"
source_list = []
with open(prefix+"_strain_16S_rRNAs.fasta","w") as rRNAs:
for seqID, strain in source_strain.items():
search = strain[0]+"[Organism] AND 16S NOT genome" # key word to search 16S rRNA of source organism
handle_esearch = Entrez.esearch(db="nucleotide",term=search)
records = Entrez.read(handle_esearch)
handle_esearch.close()
if len(records["IdList"]) > 0:
source_list.append(efetch_from_nt_list(seqID, strain, records["IdList"], rRNAs))
print "Done\n"
return source_list
def efetch_from_nt_list(seqID, strain, nt_list, rRNA_out):
handle_efetch = Entrez.efetch(db="nucleotide", id=nt_list, retmode="xml", validate=False)
records = Entrez.read(handle_efetch, validate=False)
handle_efetch.close()
temp = None
for record in records:
HIT_DEF = record["GBSeq_definition"]
if '16' in HIT_DEF and 1200 < int(record["GBSeq_length"]) < 1600 and 'inter' not in HIT_DEF:
if temp == None:
if record.get('GBSeq_sequence'):
rRNA_out.write('>'+record['GBSeq_locus']+'\t'+seqID+'\t'+HIT_DEF+'\n'+record['GBSeq_sequence']+'\n')
temp = seqID
print record['GBSeq_locus']+'\t'+seqID+'\t'+HIT_DEF+'\t'+str(len(record['GBSeq_sequence']))
return temp
def delete_redundancy(rRNA_sources, refseq):
with open(prefix+'_strain.fasta','w') as fg_strain:
count = 0
for item in rRNA_sources:
if item in refseq.keys():
fg_strain.write('>'+item+'\n'+str(refseq[item][1])+'\n')
count+=1
print "No. of refseqs with strain level description: {0}\n".format(count)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-gp',dest='gp',required=True, help='publically available sequences retrieved from NCBI protein database in gp format')
parser.add_argument('--email',dest='email',required=True, help='to inform NCBI who you are')
parser.add_argument('-o',dest='o_dir',required=True, help='the directory where output files will be saved')
parser.add_argument('-l',dest='length', default = 50, help='minimum length of reference sequences. Default: 50')
args=parser.parse_args()
Entrez.email = args.email
global o_dir, prefix
o_dir = make_directory(args.o_dir)
prefix = os.path.join(o_dir,os.path.basename(args.gp).split(".")[0])
gp = delete_contig_line_from_gbk(args.gp)
ref_xref, source_strain, refseq = parsing_gp(gp, args.length)
efetch_from_taxonomy(ref_xref)
source_list = searching_nt(source_strain)
delete_redundancy(source_list, source_strain)
| gpl-3.0 |
smartfile/django-1.4 | tests/regressiontests/urlpatterns_reverse/urls.py | 3 | 3864 | from __future__ import absolute_import
from django.conf.urls import patterns, url, include
from .views import empty_view, empty_view_partial, empty_view_wrapped, absolute_kwargs_view
other_patterns = patterns('',
url(r'non_path_include/$', empty_view, name='non_path_include'),
url(r'nested_path/$', 'regressiontests.urlpatterns_reverse.views.nested_view'),
)
urlpatterns = patterns('',
url(r'^places/(\d+)/$', empty_view, name='places'),
url(r'^places?/$', empty_view, name="places?"),
url(r'^places+/$', empty_view, name="places+"),
url(r'^places*/$', empty_view, name="places*"),
url(r'^(?:places/)?$', empty_view, name="places2?"),
url(r'^(?:places/)+$', empty_view, name="places2+"),
url(r'^(?:places/)*$', empty_view, name="places2*"),
url(r'^places/(\d+|[a-z_]+)/', empty_view, name="places3"),
url(r'^places/(?P<id>\d+)/$', empty_view, name="places4"),
url(r'^people/(?P<name>\w+)/$', empty_view, name="people"),
url(r'^people/(?:name/)', empty_view, name="people2"),
url(r'^people/(?:name/(\w+)/)?', empty_view, name="people2a"),
url(r'^people/(?P<name>\w+)-(?P=name)/$', empty_view, name="people_backref"),
url(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name="optional"),
url(r'^hardcoded/$', empty_view, name="hardcoded"),
url(r'^hardcoded/doc\.pdf$', empty_view, name="hardcoded2"),
url(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name="people3"),
url(r'^people/(?P<state>\w\w)/(?P<name>\d)/$', empty_view, name="people4"),
url(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name="people6"),
url(r'^character_set/[abcdef0-9]/$', empty_view, name="range"),
url(r'^character_set/[\w]/$', empty_view, name="range2"),
url(r'^price/\$(\d+)/$', empty_view, name="price"),
url(r'^price/[$](\d+)/$', empty_view, name="price2"),
url(r'^price/[\$](\d+)/$', empty_view, name="price3"),
url(r'^product/(?P<product>\w+)\+\(\$(?P<price>\d+(\.\d+)?)\)/$',
empty_view, name="product"),
url(r'^headlines/(?P<year>\d+)\.(?P<month>\d+)\.(?P<day>\d+)/$', empty_view,
name="headlines"),
url(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view,
name="windows"),
url(r'^special_chars/(.+)/$', empty_view, name="special"),
url(r'^(?P<name>.+)/\d+/$', empty_view, name="mixed"),
url(r'^repeats/a{1,2}/$', empty_view, name="repeats"),
url(r'^repeats/a{2,4}/$', empty_view, name="repeats2"),
url(r'^repeats/a{2}/$', empty_view, name="repeats3"),
url(r'^(?i)CaseInsensitive/(\w+)', empty_view, name="insensitive"),
url(r'^test/1/?', empty_view, name="test"),
url(r'^(?i)test/2/?$', empty_view, name="test2"),
url(r'^outer/(?P<outer>\d+)/',
include('regressiontests.urlpatterns_reverse.included_urls')),
url('', include('regressiontests.urlpatterns_reverse.extra_urls')),
# This is non-reversible, but we shouldn't blow up when parsing it.
url(r'^(?:foo|bar)(\w+)/$', empty_view, name="disjunction"),
# Partials should be fine.
url(r'^partial/', empty_view_partial, name="partial"),
url(r'^partial_wrapped/', empty_view_wrapped, name="partial_wrapped"),
# Regression views for #9038. See tests for more details
url(r'arg_view/$', 'kwargs_view'),
url(r'arg_view/(?P<arg1>\d+)/$', 'kwargs_view'),
url(r'absolute_arg_view/(?P<arg1>\d+)/$', absolute_kwargs_view),
url(r'absolute_arg_view/$', absolute_kwargs_view),
# Tests for #13154. Mixed syntax to test both ways of defining URLs.
url(r'defaults_view1/(?P<arg1>\d+)/', 'defaults_view', {'arg2': 1}, name='defaults'),
(r'defaults_view2/(?P<arg1>\d+)/', 'defaults_view', {'arg2': 2}, 'defaults'),
url('^includes/', include(other_patterns)),
# Security tests
url('(.+)/security/$', empty_view, name='security'),
)
| bsd-3-clause |
4bic/censusreporter | census/urls.py | 2 | 6657 | from django.conf import settings
from django.conf.urls import url, patterns, include
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.views.generic.base import TemplateView, RedirectView
from .views import (HomepageView, GeographySearchView,
TableDetailView, TableSearchView, GeoSearch,
HealthcheckView, DataView, TopicView, ExampleView, Elasticsearch)
from .wazi import (GeographyDetailView, GeographyJsonView, WardSearchProxy, PlaceSearchJson,
LocateView, DataAPIView, TableAPIView, AboutView, GeographyCompareView)
admin.autodiscover()
STANDARD_CACHE_TIME = 60*60 # 60-minute cache
COMPARISON_FORMATS = 'map|table|distribution'
BLOCK_ROBOTS = getattr(settings, 'BLOCK_ROBOTS', False)
geo_levels = 'ward|municipality|province|country'
urlpatterns = patterns('',
url(
regex = '^$',
view = cache_page(STANDARD_CACHE_TIME)(HomepageView.as_view()),
kwargs = {},
name = 'homepage',
),
url(
regex = '^about$',
view = cache_page(STANDARD_CACHE_TIME)(AboutView.as_view()),
kwargs = {},
name = 'about',
),
# e.g. /profiles/province-GT/
url(
regex = '^profiles/(?P<geography_id>(%s)-[\w]+)/$' % geo_levels,
view = cache_page(STANDARD_CACHE_TIME)(GeographyDetailView.as_view()),
kwargs = {},
name = 'geography_detail',
),
# e.g. /profiles/province-GT.json
url(
regex = '^(embed_data/)?profiles/(?P<geography_id>(%s)-[\w]+)\.json$' % geo_levels,
view = cache_page(STANDARD_CACHE_TIME)(GeographyJsonView.as_view()),
kwargs = {},
name = 'geography_json',
),
# e.g. /compare/province-GT/vs/province-WC/
url(
regex = '^compare/(?P<geo_id1>(%s)-[\w]+)/vs/(?P<geo_id2>(%s)-[\w]+)/$' % (geo_levels, geo_levels),
view = cache_page(STANDARD_CACHE_TIME)(GeographyCompareView.as_view()),
kwargs = {},
name = 'geography_compare',
),
# Custom data api
url(
regex = '^api/1.0/data/show/latest$',
view = cache_page(STANDARD_CACHE_TIME)(DataAPIView.as_view()),
kwargs = {'action': 'show'},
name = 'api_show_data',
),
# download API
url(
regex = '^api/1.0/data/download/latest$',
view = DataAPIView.as_view(),
kwargs = {'action': 'download'},
name = 'api_download_data',
),
# table search API
url(
regex = '^api/1.0/table$',
view = cache_page(STANDARD_CACHE_TIME)(TableAPIView.as_view()),
kwargs = {},
name = 'api_list_tables',
),
# TODO enable this see: https://github.com/Code4SA/censusreporter/issues/31
#url(
# regex = '^profiles/$',
# view = cache_page(STANDARD_CACHE_TIME)(GeographySearchView.as_view()),
# kwargs = {},
# name = 'geography_search',
#),
# e.g. /table/B01001/
#url(
# regex = '^tables/B23002/$',
# view = RedirectView.as_view(url=reverse_lazy('table_detail',kwargs={'table':'B23002A'})),
# kwargs = {},
# name = 'redirect_B23002',
#),
#url(
# regex = '^tables/C23002/$',
# view = RedirectView.as_view(url=reverse_lazy('table_detail',kwargs={'table':'C23002A'})),
# kwargs = {},
# name = 'redirect_C23002',
#),
#url(
# regex = '^tables/(?P<table>[a-zA-Z0-9]+)/$',
# view = cache_page(STANDARD_CACHE_TIME)(TableDetailView.as_view()),
# kwargs = {},
# name = 'table_detail',
#),
#url(
# regex = '^tables/$',
# view = cache_page(STANDARD_CACHE_TIME)(TableSearchView.as_view()),
# kwargs = {},
# name = 'table_search',
#),
url(
regex = '^data/$',
view = RedirectView.as_view(url=reverse_lazy('table_search')),
kwargs = {},
name = 'table_search_redirect',
),
# e.g. /table/B01001/
url(
regex = '^data/(?P<format>%s)/$' % COMPARISON_FORMATS,
view = cache_page(STANDARD_CACHE_TIME)(DataView.as_view()),
kwargs = {},
name = 'data_detail',
),
#url(
# regex = '^topics/$',
# view = cache_page(STANDARD_CACHE_TIME)(TopicView.as_view()),
# kwargs = {},
# name = 'topic_list',
#),
#url(
# regex = '^topics/race-latino/?$',
# view = RedirectView.as_view(url=reverse_lazy('topic_detail', kwargs={'topic_slug': 'race-hispanic'})),
# name = 'topic_latino_redirect',
#),
#url(
# regex = '^topics/(?P<topic_slug>[-\w]+)/$',
# view = cache_page(STANDARD_CACHE_TIME)(TopicView.as_view()),
# kwargs = {},
# name = 'topic_detail',
#),
url(
regex = '^examples/(?P<example_slug>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ExampleView.as_view()),
kwargs = {},
name = 'example_detail',
),
#url(
# regex = '^glossary/$',
# view = cache_page(STANDARD_CACHE_TIME)(TemplateView.as_view(template_name="glossary.html")),
# kwargs = {},
# name = 'glossary',
#),
url(
regex = '^locate/$',
view = cache_page(STANDARD_CACHE_TIME)(LocateView.as_view(template_name="locate/locate.html")),
kwargs = {},
name = 'locate',
),
url(
regex = '^healthcheck$',
view = HealthcheckView.as_view(),
kwargs = {},
name = 'healthcheck',
),
url(
regex = '^robots.txt$',
view = lambda r: HttpResponse(
"User-agent: *\n%s: /" % ('Disallow' if BLOCK_ROBOTS else 'Allow') ,
mimetype="text/plain"
)
),
url(
regex = '^place-search/json/$',
view = PlaceSearchJson.as_view(),
kwargs = {},
name = 'place_search_json',
),
url(
regex = '^ward-search/json/$',
view = WardSearchProxy.as_view(),
kwargs = {},
name = 'ward_search_json',
),
## LOCAL DEV VERSION OF API ##
url(
regex = '^geo-search/$',
view = GeoSearch.as_view(),
kwargs = {},
name = 'geo_search',
),
url(
regex = '^elasticsearch/$',
view = Elasticsearch.as_view(),
kwargs = {},
name = 'elasticsearch',
),
## END LOCAL DEV VERSION OF API ##
)
| mit |
rwl/openpowersystem | rdflib/sparql/bison/Query.py | 1 | 3614 | from rdflib.sparql.bison.GraphPattern import GraphPattern
class Query(object):
"""
Query ::= Prolog ( SelectQuery | ConstructQuery | DescribeQuery | AskQuery )
See: http://www.w3.org/TR/rdf-sparql-query/#rQuery
"""
def __init__(self,prolog,query):
self.prolog = prolog
self.query = query
def __repr__(self):
return repr(self.query)
class WhereClause(object):
"""
The where clause is essentially a wrapper for an instance of a ParsedGraphPattern
"""
def __init__(self,parsedGraphPattern):
self.parsedGraphPattern = parsedGraphPattern
class SelectQuery(object):
"""
SelectQuery ::= 'SELECT' 'DISTINCT'? ( Var+ | '*' ) DatasetClause* WhereClause SolutionModifier
See: http://www.w3.org/TR/rdf-sparql-query/#rSelectQuery
"""
def __init__(self,variables,dataSetList,whereClause,solutionModifier,distinct=None):
self.variables = variables is not None and variables or []
self.dataSets = dataSetList and dataSetList or []
self.whereClause = whereClause
self.solutionModifier = solutionModifier
self.distinct = distinct is not None
def __repr__(self):
return "SELECT %s %s %s %s %s"%(self.distinct and 'DISTINCT' or '',self.variables and self.variables or '*',self.dataSets,self.whereClause.parsedGraphPattern,self.solutionModifier and self.solutionModifier or '')
class AskQuery(object):
"""
AskQuery ::= 'ASK' DatasetClause* WhereClause
See: http://www.w3.org/TR/rdf-sparql-query/#rAskQuery
"""
def __init__(self,dataSetList,whereClause):
self.dataSets = dataSetList and dataSetList or []
self.whereClause = whereClause
def __repr__(self):
return "ASK %s %s"%(self.dataSets,self.whereClause.parsedGraphPattern)
class ConstructQuery(object):
"""
ConstructQuery ::= 'CONSTRUCT' ConstructTemplate DatasetClause* WhereClause SolutionModifier
See: http://www.w3.org/TR/rdf-sparql-query/#rConstructQuery
"""
def __init__(self,triples,dataSetList,whereClause,solutionModifier):
self.triples = GraphPattern(triples=triples)
self.dataSets = dataSetList and dataSetList or []
self.whereClause = whereClause
self.solutionModifier = solutionModifier
class DescribeQuery(object):
"""
DescribeQuery ::= 'DESCRIBE' ( VarOrIRIref+ | '*' ) DatasetClause* WhereClause? SolutionModifier
http://www.w3.org/TR/rdf-sparql-query/#rConstructQuery
"""
def __init__(self,variables,dataSetList,whereClause,solutionModifier):
self.describeVars = variables is not None and variables or []
self.dataSets = dataSetList and dataSetList or []
self.whereClause = whereClause
self.solutionModifier = solutionModifier
def __repr__(self):
return "DESCRIBE %s %s %s %s"%(
self.describeVars,
self.dataSets,
self.whereClause.parsedGraphPattern,
self.solutionModifier)
class Prolog(object):
"""
Prolog ::= BaseDecl? PrefixDecl*
See: http://www.w3.org/TR/rdf-sparql-query/#rProlog
"""
def __init__(self,baseDeclaration,prefixDeclarations):
self.baseDeclaration = baseDeclaration
self.extensionFunctions={}
self.prefixBindings = {}
if prefixDeclarations:
for prefixBind in prefixDeclarations:
if hasattr(prefixBind,'base'):
self.prefixBindings[prefixBind.qName] = prefixBind.base
def __repr__(self):
return repr(self.prefixBindings)
| agpl-3.0 |
ioana-delaney/spark | examples/src/main/python/ml/vector_indexer_example.py | 123 | 1685 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import VectorIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("VectorIndexerExample")\
.getOrCreate()
# $example on$
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
indexer = VectorIndexer(inputCol="features", outputCol="indexed", maxCategories=10)
indexerModel = indexer.fit(data)
categoricalFeatures = indexerModel.categoryMaps
print("Chose %d categorical features: %s" %
(len(categoricalFeatures), ", ".join(str(k) for k in categoricalFeatures.keys())))
# Create new column "indexed" with categorical values transformed to indices
indexedData = indexerModel.transform(data)
indexedData.show()
# $example off$
spark.stop()
| apache-2.0 |
treetrnk/Tuxemon | tests/interactive/test_combat.py | 6 | 2183 | """
from combat.py
"""
if __name__ == "__main__":
print("Runs as standalone")
from core.components import config
class Game(object):
def __init__(self):
# set up pygame
pygame.init()
# read the configuration file
self.config = config.Config()
# The game resolution
self.resolution = self.config.resolution
# set up the window
self.screen = pygame.display.set_mode(self.resolution, self.config.fullscreen, 32)
pygame.display.set_caption('Tuxemon Combat System')
# Create a clock object that will keep track of how much time has passed since the last frame
self.clock = pygame.time.Clock()
# Set the font for the FPS and other shit
self.font = pygame.font.Font(prepare.BASEDIR + "resources/font/PressStart2P.ttf", 14)
# Native resolution is similar to the old gameboy resolution. This is used for scaling.
self.native_resolution = [240, 160]
# If scaling is enabled, set the scaling based on resolution
if self.config.scaling == "1":
self.scale = int((self.resolution[0] / self.native_resolution[0]))
else:
self.scale = 1
self.combat = COMBAT(self)
while True:
self.clock.tick()
self.events = pygame.event.get()
for event in self.events:
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Exit the game if you press ESC
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
self.combat.draw()
self.combat.handle_events(self)
# Calculate the FPS and print it onscreen for debugging purposes
fps = self.font.draw("FPS: " + str(self.clock.get_fps()), 1, (240, 240, 240))
self.screen.blit(fps, (10, 10))
pygame.display.flip()
Game()
| gpl-3.0 |
songmonit/CTTMSONLINE | addons/l10n_fr/l10n_fr.py | 336 | 2089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class l10n_fr_report(osv.osv):
_name = 'l10n.fr.report'
_description = 'Report for l10n_fr'
_columns = {
'code': fields.char('Code', size=64),
'name': fields.char('Name'),
'line_ids': fields.one2many('l10n.fr.line', 'report_id', 'Lines', copy=True),
}
_sql_constraints = [
('code_uniq', 'unique (code)','The code report must be unique !')
]
class l10n_fr_line(osv.osv):
_name = 'l10n.fr.line'
_description = 'Report Lines for l10n_fr'
_columns = {
'code': fields.char('Variable Name', size=64),
'definition': fields.char('Definition'),
'name': fields.char('Name'),
'report_id': fields.many2one('l10n.fr.report', 'Report'),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The variable name must be unique !')
]
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'siret': fields.char('SIRET', size=14),
'ape': fields.char('APE'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nesdis/djongo | tests/django_tests/tests/v22/tests/model_fields/test_integerfield.py | 44 | 7324 | import unittest
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, models
from django.test import SimpleTestCase, TestCase
from .models import (
BigIntegerModel, IntegerModel, PositiveIntegerModel,
PositiveSmallIntegerModel, SmallIntegerModel,
)
class IntegerFieldTests(TestCase):
model = IntegerModel
documented_range = (-2147483648, 2147483647)
@property
def backend_range(self):
field = self.model._meta.get_field('value')
internal_type = field.get_internal_type()
return connection.ops.integer_field_range(internal_type)
def test_documented_range(self):
"""
Values within the documented safe range pass validation, and can be
saved and retrieved without corruption.
"""
min_value, max_value = self.documented_range
instance = self.model(value=min_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__lte=min_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, min_value)
instance = self.model(value=max_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__gte=max_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, max_value)
def test_backend_range_save(self):
"""
Backend specific ranges can be saved without corruption.
"""
min_value, max_value = self.backend_range
if min_value is not None:
instance = self.model(value=min_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__lte=min_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, min_value)
if max_value is not None:
instance = self.model(value=max_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__gte=max_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, max_value)
def test_backend_range_validation(self):
"""
Backend specific ranges are enforced at the model validation level
(#12030).
"""
min_value, max_value = self.backend_range
if min_value is not None:
instance = self.model(value=min_value - 1)
expected_message = validators.MinValueValidator.message % {
'limit_value': min_value,
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = min_value
instance.full_clean()
if max_value is not None:
instance = self.model(value=max_value + 1)
expected_message = validators.MaxValueValidator.message % {
'limit_value': max_value,
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = max_value
instance.full_clean()
def test_redundant_backend_range_validators(self):
"""
If there are stricter validators than the ones from the database
backend then the backend validators aren't added.
"""
min_backend_value, max_backend_value = self.backend_range
if min_backend_value is not None:
min_custom_value = min_backend_value + 1
ranged_value_field = self.model._meta.get_field('value').__class__(
validators=[validators.MinValueValidator(min_custom_value)]
)
field_range_message = validators.MinValueValidator.message % {
'limit_value': min_custom_value,
}
with self.assertRaisesMessage(ValidationError, "[%r]" % field_range_message):
ranged_value_field.run_validators(min_backend_value - 1)
if max_backend_value is not None:
max_custom_value = max_backend_value - 1
ranged_value_field = self.model._meta.get_field('value').__class__(
validators=[validators.MaxValueValidator(max_custom_value)]
)
field_range_message = validators.MaxValueValidator.message % {
'limit_value': max_custom_value,
}
with self.assertRaisesMessage(ValidationError, "[%r]" % field_range_message):
ranged_value_field.run_validators(max_backend_value + 1)
def test_types(self):
instance = self.model(value=0)
self.assertIsInstance(instance.value, int)
instance.save()
self.assertIsInstance(instance.value, int)
instance = self.model.objects.get()
self.assertIsInstance(instance.value, int)
def test_coercing(self):
self.model.objects.create(value='10')
instance = self.model.objects.get(value='10')
self.assertEqual(instance.value, 10)
class SmallIntegerFieldTests(IntegerFieldTests):
model = SmallIntegerModel
documented_range = (-32768, 32767)
class BigIntegerFieldTests(IntegerFieldTests):
model = BigIntegerModel
documented_range = (-9223372036854775808, 9223372036854775807)
class PositiveSmallIntegerFieldTests(IntegerFieldTests):
model = PositiveSmallIntegerModel
documented_range = (0, 32767)
class PositiveIntegerFieldTests(IntegerFieldTests):
model = PositiveIntegerModel
documented_range = (0, 2147483647)
@unittest.skipIf(connection.vendor == 'sqlite', "SQLite doesn't have a constraint.")
def test_negative_values(self):
p = PositiveIntegerModel.objects.create(value=0)
p.value = models.F('value') - 1
with self.assertRaises(IntegrityError):
p.save()
class ValidationTests(SimpleTestCase):
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(f.clean('2', None), 2)
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
with self.assertRaises(ValidationError):
f.clean('a', None)
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
with self.assertRaises(ValidationError):
f.clean(None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertIsNone(f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
with self.assertRaises(ValidationError):
f.clean(None, None)
with self.assertRaises(ValidationError):
f.clean('', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
with self.assertRaises(ValidationError):
f.clean('0', None)
| agpl-3.0 |
abdullah2891/remo | vendor-local/lib/python/docutils/languages/ru.py | 20 | 3265 | # $Id: ru.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Roman Suzi <rnd@onego.ru>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
u'abstract': u'\u0410\u043d\u043d\u043e\u0442\u0430\u0446\u0438\u044f',
u'address': u'\u0410\u0434\u0440\u0435\u0441',
u'attention': u'\u0412\u043d\u0438\u043c\u0430\u043d\u0438\u0435!',
u'author': u'\u0410\u0432\u0442\u043e\u0440',
u'authors': u'\u0410\u0432\u0442\u043e\u0440\u044b',
u'caution': u'\u041e\u0441\u0442\u043e\u0440\u043e\u0436\u043d\u043e!',
u'contact': u'\u041a\u043e\u043d\u0442\u0430\u043a\u0442',
u'contents':
u'\u0421\u043e\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435',
u'copyright': u'\u041f\u0440\u0430\u0432\u0430 '
u'\u043a\u043e\u043f\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f',
u'danger': u'\u041e\u041f\u0410\u0421\u041d\u041e!',
u'date': u'\u0414\u0430\u0442\u0430',
u'dedication':
u'\u041f\u043e\u0441\u0432\u044f\u0449\u0435\u043d\u0438\u0435',
u'error': u'\u041e\u0448\u0438\u0431\u043a\u0430',
u'hint': u'\u0421\u043e\u0432\u0435\u0442',
u'important': u'\u0412\u0430\u0436\u043d\u043e',
u'note': u'\u041f\u0440\u0438\u043c\u0435\u0447\u0430\u043d\u0438\u0435',
u'organization':
u'\u041e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f',
u'revision': u'\u0420\u0435\u0434\u0430\u043a\u0446\u0438\u044f',
u'status': u'\u0421\u0442\u0430\u0442\u0443\u0441',
u'tip': u'\u041f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430',
u'version': u'\u0412\u0435\u0440\u0441\u0438\u044f',
u'warning': u'\u041f\u0440\u0435\u0434\u0443\u043f\u0440\u0435\u0436'
u'\u0434\u0435\u043d\u0438\u0435'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'\u0430\u043d\u043d\u043e\u0442\u0430\u0446\u0438\u044f': u'abstract',
u'\u0430\u0434\u0440\u0435\u0441': u'address',
u'\u0430\u0432\u0442\u043e\u0440': u'author',
u'\u0430\u0432\u0442\u043e\u0440\u044b': u'authors',
u'\u043a\u043e\u043d\u0442\u0430\u043a\u0442': u'contact',
u'\u043f\u0440\u0430\u0432\u0430 \u043a\u043e\u043f\u0438\u0440\u043e'
u'\u0432\u0430\u043d\u0438\u044f': u'copyright',
u'\u0434\u0430\u0442\u0430': u'date',
u'\u043f\u043e\u0441\u0432\u044f\u0449\u0435\u043d\u0438\u0435':
u'dedication',
u'\u043e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f':
u'organization',
u'\u0440\u0435\u0434\u0430\u043a\u0446\u0438\u044f': u'revision',
u'\u0441\u0442\u0430\u0442\u0443\u0441': u'status',
u'\u0432\u0435\u0440\u0441\u0438\u044f': u'version'}
"""Russian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| bsd-3-clause |
jolevq/odoopub | addons/l10n_lu/__init__.py | 376 | 1054 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zepheira/rdf-to-ejson | lib/rdf_to_ejson.py | 1 | 5385 | import simplejson
import httplib2
import rdflib
import hashlib
import sys, os
__version__ = "0.2"
_RDF_TYPE = u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
_RDFS_LABEL = "label" # "http://www.w3.org/2000/01/rdf-schema#label"
_SKOS_PREFLABEL = "prefLabel" # "http://www.w3.org/2004/02/skos/core#prefLabel"
# Converts rdflib URIRefs to Exhibit property names. Assumes unique names.
# NOTE that for consistent naming, an rdflib Graph needs to return the
# triples in a consistent order, otherwise the "winner" (who gets to use
# the unhyphenated short name) will vary between runs. Not sure what
# rdflib guarantees here but works so far.
PROP_TRANSLATE = lambda x: os.path.basename(x.rstrip("/")).rpartition("#")[2]
# Track the properties and types for inclusion in output
_EXHIBIT_PROPS = {}
_EXHIBIT_TYPES = {}
# Example label builder for an "Employee" resource type
EXAMPLE_LABEL_BUILDER = {
"Employee": lambda r: " ".join((r.get("lastName"),r.get("firstName")))
}
# Mapping from XSD type to Exhibit valueType. Augment as required
DEFAULT_LITERAL_TYPE_MAP = {
"http://www.w3.org/2001/XMLSchema#decimal": "number",
"http://www.w3.org/2001/XMLSchema#integer": "number",
"http://www.w3.org/2001/XMLSchema#boolean": "boolean",
"http://www.w3.org/2001/XMLSchema#dateTime": "date",
"http://www.w3.org/2001/XMLSchema#string": "text",
}
def _add_property(_p,p,_o,used,is_item=False):
"""
Add a new property to the dict of existing properties, performing
conflict detection and correction.
"""
orig_p = p
if p in used:
if not used[p] == unicode(_p):
p = _rename_property(p,unicode(_p))
prop = {
"uri": unicode(_p)
}
if is_item:
prop["valueType"] = "item"
else:
if hasattr(_o,"datatype"):
#print >> sys.stderr,repr((_o,_o.datatype))
vt = DEFAULT_LITERAL_TYPE_MAP.get(str(_o.datatype))
if vt:
prop["valueType"] = vt
used[p] = unicode(_p)
_EXHIBIT_PROPS[p] = prop
return p
def _add_type(_p,p,used):
"""
Add new exhibit type in a similar way to properties
"""
orig_p = p
if p in used:
if not used[p] == unicode(_p):
p = _rename_property(p,unicode(_p))
prop = {
"uri": unicode(_p)
}
used[p] = unicode(_p)
_EXHIBIT_TYPES[p] = prop
return p
def _rename_property(p,_p):
"""
Return a new short name for p based on the original p as well as
part of a hash of the full URI of the property (for repeatability)
"""
return "%s-%s"%(p,hashlib.md5(_p).hexdigest().upper()[:4])
def convert(graph,label_builder={},literal_type_map=DEFAULT_LITERAL_TYPE_MAP):
"""
Convert RDF into Exhibit JSON... well actually just a Python data structure
that can be serialized to JSON, in case you need to manipulate it prior to
that step.
Arguments:
graph -- the source, an rdflib.Graph instance
label_builder -- dict mapping a type short name to a function returning the label
given the resource as an argument. See SAMPLE_LABEL_BUILDER above.
literal_type_map -- dict mapping from any source literal type to an Exhibit valueType
"""
used_props = {}
used_types = {}
# Build resources in a dict keyed by their short name ...
keyed_resources = {}
for (_s,_p,_o) in graph:
(s,p,o) = PROP_TRANSLATE(unicode(_s)), PROP_TRANSLATE(unicode(_p)), unicode(_o)
# Looks up whether the object is used as a subject in the graph and so
# needs to be typed as an Exhibit item
try:
graph.triples((_o,None,None)).next()
is_item = True
except StopIteration:
is_item = False
p = _add_property(_p,p,_o,used_props,is_item)
if s not in keyed_resources:
keyed_resources[s] = { "id": s }
new_o = PROP_TRANSLATE(o)
if unicode(_p) == _RDF_TYPE:
new_o = _add_type(o,new_o,used_types)
# Check for existing properties of this resource, and if a dup,
# create or update value as a list
if p in keyed_resources[s]:
if isinstance(keyed_resources[s][p],list):
keyed_resources[s][p].append(new_o)
else:
keyed_resources[s][p] = [keyed_resources[s][p], new_o]
else:
keyed_resources[s][p] = new_o
# Second pass for label extraction
resources = keyed_resources.values()
for r in resources:
# FIXME use of short name as key makes label detection ambiguous.
# May want to
if "label" in r:
label = r["label"]
else:
label = None
rtype = r.get("type")
if rtype:
if isinstance(rtype,list):
rtype = rtype[0] # FIXME pick first non-null label?
label_func = label_builder.get(rtype)
if label_func:
label = label_func(r)
if isinstance(label,list): label = label[0]
r["label"] = label or r["id"]
return {"items": resources,
"types": _EXHIBIT_TYPES,
"properties": _EXHIBIT_PROPS}
if __name__ == "__main__":
graph = rdflib.Graph()
graph.load(sys.argv[1],format=sys.argv[2])
exhibit = convert(graph)
print simplejson.dumps(exhibit,indent=4)
| apache-2.0 |
mcgachey/edx-platform | lms/djangoapps/course_api/blocks/transformers/tests/test_block_counts.py | 32 | 2042 | """
Tests for BlockCountsTransformer.
"""
# pylint: disable=protected-access
from openedx.core.lib.block_cache.block_structure_factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..block_counts import BlockCountsTransformer
class TestBlockCountsTransformer(ModuleStoreTestCase):
"""
Test behavior of BlockCountsTransformer
"""
def setUp(self):
super(TestBlockCountsTransformer, self).setUp()
self.course_key = SampleCourseFactory.create().id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def test_transform(self):
# collect phase
BlockCountsTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields()
# transform phase
BlockCountsTransformer(['problem', 'chapter']).transform(usage_info=None, block_structure=self.block_structure)
# block_counts
chapter_x_key = self.course_key.make_usage_key('chapter', 'chapter_x')
block_counts_for_chapter_x = self.block_structure.get_transformer_block_data(
chapter_x_key, BlockCountsTransformer,
)
block_counts_for_course = self.block_structure.get_transformer_block_data(
self.course_usage_key, BlockCountsTransformer,
)
# verify count of chapters
self.assertEquals(block_counts_for_course['chapter'], 2)
# verify count of problems
self.assertEquals(block_counts_for_course['problem'], 6)
self.assertEquals(block_counts_for_chapter_x['problem'], 3)
# verify other block types are not counted
for block_type in ['course', 'html', 'video']:
self.assertNotIn(block_type, block_counts_for_course)
self.assertNotIn(block_type, block_counts_for_chapter_x)
| agpl-3.0 |
thefinn93/CouchPotatoServer | libs/flask/blueprints.py | 50 | 14240 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, `None`
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprint.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
self.view_functions = {}
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
| gpl-3.0 |
ndtrung81/lammps | python/lammps/data.py | 2 | 3145 | # ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
################################################################################
# LAMMPS data structures
# Written by Richard Berger <richard.berger@temple.edu>
################################################################################
class NeighList:
"""This is a wrapper class that exposes the contents of a neighbor list.
It can be used like a regular Python list. Each element is a tuple of:
* the atom local index
* its number of neighbors
* and a pointer to an c_int array containing local atom indices of its
neighbors
Internally it uses the lower-level LAMMPS C-library interface.
:param lmp: reference to instance of :py:class:`lammps`
:type lmp: lammps
:param idx: neighbor list index
:type idx: int
"""
def __init__(self, lmp, idx):
self.lmp = lmp
self.idx = idx
def __str__(self):
return "Neighbor List ({} atoms)".format(self.size)
def __repr__(self):
return self.__str__()
@property
def size(self):
"""
:return: number of elements in neighbor list
"""
return self.lmp.get_neighlist_size(self.idx)
def get(self, element):
"""
Access a specific neighbor list entry. "element" must be a number from 0 to the size-1 of the list
:return: tuple with atom local index, number of neighbors and ctypes pointer to neighbor's local atom indices
:rtype: (int, int, ctypes.POINTER(c_int))
"""
iatom, numneigh, neighbors = self.lmp.get_neighlist_element_neighbors(self.idx, element)
return iatom, numneigh, neighbors
# the methods below implement the iterator interface, so NeighList can be used like a regular Python list
def __getitem__(self, element):
return self.get(element)
def __len__(self):
return self.size
def __iter__(self):
inum = self.size
for ii in range(inum):
yield self.get(ii)
def find(self, iatom):
"""
Find the neighbor list for a specific (local) atom iatom.
If there is no list for iatom, (-1, None) is returned.
:return: tuple with number of neighbors and ctypes pointer to neighbor's local atom indices
:rtype: (int, ctypes.POINTER(c_int))
"""
inum = self.size
for ii in range(inum):
idx, numneigh, neighbors = self.get(ii)
if idx == iatom:
return numneigh, neighbors
return -1, None
| gpl-2.0 |
Darredevil/RIOT | dist/tools/pyterm/testbeds/testbeds.py | 100 | 7138 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Philipp Rosenkranz <philipp.rosenkranz@fu-berlin.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os, re, datetime
from subprocess import call, Popen, PIPE
class Testbed():
log_dir_name = 'log'
def __init__(self):
pass
def initCleanWithFlash(self):
self.stop()
self.cleanLogs()
self.flashNodes()
self.start()
def initClean(self):
self.cleanLogs()
self.start()
def flashNodes(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def cleanLogs(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def archiveLogs(self, experiment = None):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def start(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def stop(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def defaultArchivePostfix(self, experimentName = None):
if not experimentName:
experimentName = "unknown"
time = datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
postfix = "-" + experimentName +"_" + time
return postfix
def printAndCall(self, cmdString):
print(cmdString)
call(cmdString, shell=True)
class DESTestbed(Testbed):
def __init__(self, serverHost = None, serverPort=None, userName = None, flasher = None,
hexfilePath = None, pyterm = None, logFilePath = None, hostFile = None):
self.serverHost = serverHost
self.serverPort = str(serverPort)
self.userName = userName
self.flasher = flasher
self.hexFilePath = hexfilePath
self.pyterm = pyterm
self.logFilePath = logFilePath
self.hostFile = hostFile
def flashNodes(self):
self.printAndCall("parallel-ssh -h %s -l %s 'python %s'" % (self.hostFile, self.userName, self.flasher))
def cleanLogs(self):
self.printAndCall("rm -rf %s/*.log" % (self.logFilePath))
def archiveLogs(self, postfix = None):
postfix = self.defaultArchivePostfix(postfix)
logDir = self.logFilePath.split("/")[-1]
self.printAndCall("cd %s/..; tar -cjf archived_logs%s.tar.bz2 %s/*.log" % (self.logFilePath, postfix, logDir))
def start(self):
self.printAndCall("parallel-ssh -h %s -l %s 'screen -S pyterm -d -m python %s -ln %s'" % (self.hostFile, self.userName, self.pyterm, self.log_dir_name))
def stop(self):
self.printAndCall("parallel-ssh -h %s -l %s 'screen -X -S pyterm quit'" % (self.hostFile, self.userName))
class LocalTestbed(Testbed):
def __init__(self, serverHost = None, serverPort=None, flasher = None, hexfilePath = None, pyterm = None, logFilePath = None):
self.serverHost = serverHost
self.serverPort = str(serverPort)
self.flasher = flasher
self.hexFilePath = hexfilePath
self.pyterm = pyterm
self.logFilePath = logFilePath
def findPorts(self):
devlist = os.listdir("/dev/")
regex = re.compile('^ttyUSB')
return sorted([port for port in devlist if regex.match(port)])
def flashNodes(self):
self.printAndCall("python %s %s" % (self.flasher, self.hexFilePath))
def cleanLogs(self):
self.printAndCall("rm -rf %s/*.log" % (self.logFilePath))
def archiveLogs(self, postfix = None):
postfix = self.defaultArchivePostfix(postfix)
logDir = self.logFilePath.split("/")[-1]
self.printAndCall("cd %s/..; tar -cjf archived_logs%s.tar.bz2 %s/*.log" % (self.logFilePath, postfix, logDir))
def start(self):
portList = self.findPorts()
for port in portList:
self.printAndCall("screen -S pyterm-%s -d -m python %s -H %s -rn %s -p /dev/%s -ln %s" % (port, self.pyterm, port, port, port, self.log_dir_name))
def stop(self):
portList = self.findPorts()
for port in portList:
self.printAndCall("screen -X -S pyterm-%s quit" % (port))
class DesVirtTestbed(Testbed):
def __init__(self, serverHost = None, serverPort=None, desvirtPath = None, topologyName = None, pyterm = None, logFilePath = None):
self.serverHost = serverHost
self.serverPort = str(serverPort)
self.desvirtPath = desvirtPath
self.topologyName = topologyName
self.pyterm = pyterm
self.logFilePath = logFilePath
self.namePortList = []
def findPorts(self):
return self.namePortList
def startDesVirtNetwork(self):
print "executing: " + "./vnet --start --name " + self.topologyName + " in: " + self.desvirtPath
call("sh -c \"./vnet --define --name " + self.topologyName + "\"", cwd=self.desvirtPath, shell=True)
stream = Popen("sh -c \"./vnet --start --name " + self.topologyName + "\"", cwd=self.desvirtPath, shell=True, stderr=PIPE).stderr
pats = r'.*riotnative.*\.elf (\S+) -t (\S+)'
pattern = re.compile(pats)
for line in stream:
match = pattern.match(line)
if(match):
tuple = match.groups()
self.namePortList.append((tuple[0], int(tuple[1])))
self.namePortList = sorted(self.namePortList)
for tuple in self.namePortList:
print "name: " + tuple[0] + " port: " + str(tuple[1])
def stopDesVirtNetwork(self):
call("sh -c \"./vnet --stop --name " + self.topologyName + "\"", cwd=self.desvirtPath, shell=True)
def flashNodes(self):
pass
def cleanLogs(self):
self.printAndCall("rm -rf %s/*.log" % (self.logFilePath))
def archiveLogs(self, postfix = None):
postfix = self.defaultArchivePostfix(postfix)
logDir = self.logFilePath.split("/")[-1]
self.printAndCall("cd %s/..; tar -cjf archived_logs%s.tar.bz2 %s/*.log" % (self.logFilePath, postfix, logDir))
def start(self):
for node in self.namePortList:
self.printAndCall("screen -S pyterm-%s -d -m python %s -H %s -rn %s -ts %s -ln %s" % (node[0], self.pyterm, node[0], node[0], node[1], self.log_dir_name))
def stop(self):
print "stop called"
for node in self.namePortList:
self.printAndCall("screen -X -S pyterm-%s quit" % (node[0]))
self.stopDesVirtNetwork()
| lgpl-2.1 |
aimas/TuniErp-8.0 | addons/hr_attendance/wizard/__init__.py | 375 | 1073 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance_error
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jtopjian/st2 | st2common/st2common/util/casts.py | 7 | 1854 | # -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import json
import six
from st2common.util.compat import to_unicode
def _cast_object(x):
"""
Method for casting string to an object (dict) or array.
Note: String can be either serialized as JSON or a raw Python output.
"""
if isinstance(x, six.string_types):
try:
return json.loads(x)
except:
return ast.literal_eval(x)
else:
return x
def _cast_boolean(x):
if isinstance(x, six.string_types):
return ast.literal_eval(x.capitalize())
return x
# These types as they appear in json schema.
CASTS = {
'array': _cast_object,
'boolean': _cast_boolean,
'integer': int,
'number': float,
'object': _cast_object,
'string': to_unicode
}
def get_cast(cast_type):
"""
Determines the callable which will perform the cast given a string representation
of the type.
:param cast_type: Type of the cast to perform.
:type cast_type: ``str``
:rtype: ``callable``
"""
return CASTS.get(cast_type, None)
| apache-2.0 |
phalax4/CarnotKE | jyhton/lib-python/2.7/unittest/main.py | 115 | 9083 | """Unittest main program"""
import sys
import os
import types
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print self.USAGE % usage
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=None):
if Loader is None:
Loader = lambda: self.testLoader
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
| apache-2.0 |
Acidburn0zzz/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/logging/test_fixture.py | 30 | 3581 | # -*- coding: utf-8 -*-
import logging
import pytest
logger = logging.getLogger(__name__)
sublogger = logging.getLogger(__name__ + ".baz")
def test_fixture_help(testdir):
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines(["*caplog*"])
def test_change_level(caplog):
caplog.set_level(logging.INFO)
logger.debug("handler DEBUG level")
logger.info("handler INFO level")
caplog.set_level(logging.CRITICAL, logger=sublogger.name)
sublogger.warning("logger WARNING level")
sublogger.critical("logger CRITICAL level")
assert "DEBUG" not in caplog.text
assert "INFO" in caplog.text
assert "WARNING" not in caplog.text
assert "CRITICAL" in caplog.text
def test_change_level_undo(testdir):
"""Ensure that 'set_level' is undone after the end of the test"""
testdir.makepyfile(
"""
import logging
def test1(caplog):
caplog.set_level(logging.INFO)
# using + operator here so fnmatch_lines doesn't match the code in the traceback
logging.info('log from ' + 'test1')
assert 0
def test2(caplog):
# using + operator here so fnmatch_lines doesn't match the code in the traceback
logging.info('log from ' + 'test2')
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*log from test1*", "*2 failed in *"])
assert "log from test2" not in result.stdout.str()
def test_with_statement(caplog):
with caplog.at_level(logging.INFO):
logger.debug("handler DEBUG level")
logger.info("handler INFO level")
with caplog.at_level(logging.CRITICAL, logger=sublogger.name):
sublogger.warning("logger WARNING level")
sublogger.critical("logger CRITICAL level")
assert "DEBUG" not in caplog.text
assert "INFO" in caplog.text
assert "WARNING" not in caplog.text
assert "CRITICAL" in caplog.text
def test_log_access(caplog):
caplog.set_level(logging.INFO)
logger.info("boo %s", "arg")
assert caplog.records[0].levelname == "INFO"
assert caplog.records[0].msg == "boo %s"
assert "boo arg" in caplog.text
def test_record_tuples(caplog):
caplog.set_level(logging.INFO)
logger.info("boo %s", "arg")
assert caplog.record_tuples == [(__name__, logging.INFO, "boo arg")]
def test_unicode(caplog):
caplog.set_level(logging.INFO)
logger.info(u"bū")
assert caplog.records[0].levelname == "INFO"
assert caplog.records[0].msg == u"bū"
assert u"bū" in caplog.text
def test_clear(caplog):
caplog.set_level(logging.INFO)
logger.info(u"bū")
assert len(caplog.records)
assert caplog.text
caplog.clear()
assert not len(caplog.records)
assert not caplog.text
@pytest.fixture
def logging_during_setup_and_teardown(caplog):
caplog.set_level("INFO")
logger.info("a_setup_log")
yield
logger.info("a_teardown_log")
assert [x.message for x in caplog.get_records("teardown")] == ["a_teardown_log"]
def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown):
assert not caplog.records
assert not caplog.get_records("call")
logger.info("a_call_log")
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
# This reachers into private API, don't use this type of thing in real tests!
assert set(caplog._item.catch_log_handlers.keys()) == {"setup", "call"}
| mpl-2.0 |
realms-team/basestation-fw | libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/models/service_info.py | 3 | 7269 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ServiceInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ServiceInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'allocated_pk_period': 'int',
'estimated_hops': 'int',
'estimated_latency': 'int',
'is_sink': 'bool',
'peer_mac_address': 'str',
'requested_pk_period': 'int'
}
self.attribute_map = {
'allocated_pk_period': 'allocatedPkPeriod',
'estimated_hops': 'estimatedHops',
'estimated_latency': 'estimatedLatency',
'is_sink': 'isSink',
'peer_mac_address': 'peerMacAddress',
'requested_pk_period': 'requestedPkPeriod'
}
self._allocated_pk_period = None
self._estimated_hops = None
self._estimated_latency = None
self._is_sink = None
self._peer_mac_address = None
self._requested_pk_period = None
@property
def allocated_pk_period(self):
"""
Gets the allocated_pk_period of this ServiceInfo.
Allocated inter-packet period for this service
:return: The allocated_pk_period of this ServiceInfo.
:rtype: int
"""
return self._allocated_pk_period
@allocated_pk_period.setter
def allocated_pk_period(self, allocated_pk_period):
"""
Sets the allocated_pk_period of this ServiceInfo.
Allocated inter-packet period for this service
:param allocated_pk_period: The allocated_pk_period of this ServiceInfo.
:type: int
"""
self._allocated_pk_period = allocated_pk_period
@property
def estimated_hops(self):
"""
Gets the estimated_hops of this ServiceInfo.
Estimated number of hops
:return: The estimated_hops of this ServiceInfo.
:rtype: int
"""
return self._estimated_hops
@estimated_hops.setter
def estimated_hops(self, estimated_hops):
"""
Sets the estimated_hops of this ServiceInfo.
Estimated number of hops
:param estimated_hops: The estimated_hops of this ServiceInfo.
:type: int
"""
self._estimated_hops = estimated_hops
@property
def estimated_latency(self):
"""
Gets the estimated_latency of this ServiceInfo.
Estimated latency, in milliseconds
:return: The estimated_latency of this ServiceInfo.
:rtype: int
"""
return self._estimated_latency
@estimated_latency.setter
def estimated_latency(self, estimated_latency):
"""
Sets the estimated_latency of this ServiceInfo.
Estimated latency, in milliseconds
:param estimated_latency: The estimated_latency of this ServiceInfo.
:type: int
"""
self._estimated_latency = estimated_latency
@property
def is_sink(self):
"""
Gets the is_sink of this ServiceInfo.
Indicates whether the mote being queried originates the traffic (false) or terminates it (true)
:return: The is_sink of this ServiceInfo.
:rtype: bool
"""
return self._is_sink
@is_sink.setter
def is_sink(self, is_sink):
"""
Sets the is_sink of this ServiceInfo.
Indicates whether the mote being queried originates the traffic (false) or terminates it (true)
:param is_sink: The is_sink of this ServiceInfo.
:type: bool
"""
self._is_sink = is_sink
@property
def peer_mac_address(self):
"""
Gets the peer_mac_address of this ServiceInfo.
MAC address of the peer device on the other side of the service. Manager is designated as 00-00-00-00-00-00-00-00
:return: The peer_mac_address of this ServiceInfo.
:rtype: str
"""
return self._peer_mac_address
@peer_mac_address.setter
def peer_mac_address(self, peer_mac_address):
"""
Sets the peer_mac_address of this ServiceInfo.
MAC address of the peer device on the other side of the service. Manager is designated as 00-00-00-00-00-00-00-00
:param peer_mac_address: The peer_mac_address of this ServiceInfo.
:type: str
"""
self._peer_mac_address = peer_mac_address
@property
def requested_pk_period(self):
"""
Gets the requested_pk_period of this ServiceInfo.
Requested inter-packet period for this service
:return: The requested_pk_period of this ServiceInfo.
:rtype: int
"""
return self._requested_pk_period
@requested_pk_period.setter
def requested_pk_period(self, requested_pk_period):
"""
Sets the requested_pk_period of this ServiceInfo.
Requested inter-packet period for this service
:param requested_pk_period: The requested_pk_period of this ServiceInfo.
:type: int
"""
self._requested_pk_period = requested_pk_period
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| bsd-3-clause |
FirstDraftGIS/firstdraft | projfd/apifd/serializers.py | 1 | 3304 | from appfd.models import Basemap, Feature, Order, Place, Test
from drf_queryfields import QueryFieldsMixin
from rest_framework.serializers import HiddenField, IntegerField, NullBooleanField, CharField, ChoiceField, URLField
from rest_framework.serializers import HyperlinkedModelSerializer, ModelSerializer, Serializer, SerializerMethodField
####
from rest_framework.utils.serializer_helpers import (
BindingDict, BoundField, JSONBoundField, NestedBoundField, ReturnDict,
ReturnList
)
class MapRequestSerializer(Serializer):
basemap = CharField(max_length=200, allow_blank=True, allow_null=True, required=False)
case_insensitive = NullBooleanField(required=False)
end_user_timezone = CharField(max_length=200, allow_null=True, required=False)
map_format = ChoiceField(["all","geojson", "gif", "jpg", "png", "xy"], required=False)
text = CharField(max_length=1e10, trim_whitespace=True, allow_null=True, required=False)
url = URLField(allow_null=True, required=False)
# Serializers define the API representation.
class BasemapSerializer(QueryFieldsMixin, ModelSerializer):
class Meta:
model = Basemap
fields = ["id", "name"]
class FeatureSerializer(QueryFieldsMixin, HyperlinkedModelSerializer):
class Meta:
model = Feature
fields = ["name", "order"]
class OrderSerializer(ModelSerializer):
class Meta:
model = Order
fields = ["complete", "duration", "end", "start", "token"]
class QueryableOrderSerializer(QueryFieldsMixin, OrderSerializer):
class Meta:
model = Order
fields = ["complete", "duration", "end", "start", "token"]
class PlaceSerializer(QueryFieldsMixin, ModelSerializer):
"""
feature_type = SerializerMethodField()
def get_feature_type(self, place):
lookup = {
"FRM": "Farm",
"PCLI": "Independent Political Entity",
"PPL": "Populated Place",
"PPLA": "Admin 1",
"PPLA2": "Admin 2",
"PPLA3": "Admin 3",
"PPLA4": "Admin 4",
"PPLL": "Populated Locality",
"ST": "Street"
}
return lookup.get(place.feature_code, place.feature_code)
"""
class Meta:
model = Place
fields = ["id", "attribution", "country_code", "name", "point"]
class VerbosePlaceSerializer(PlaceSerializer):
class Meta:
model = Place
fields = [
"id", "name",
"attribution", "enwiki_title", "geonames_id", "osm_id",
"pcode", "fips",
"admin1_code", "admin2_code", "admin3_code", "admin4_code", "admin_level",
"east", "north", "south", "west",
"name", "name_ascii", "name_display", "name_en", "name_normalized", "other_names",
"geonames_feature_class", "geonames_feature_code", "place_type",
"latitude", "longitude", "area_sqkm",
"importance", "osmname_class", "osmname_type", "osm_type", "place_rank",
"dem", "elevation",
"city", "county", "country", "country_code", "state", "street",
"population", "popularity", "timezone"
]
class TestSerializer(QueryFieldsMixin, ModelSerializer):
class Meta:
model = Test
fields = ["accuracy", "created"]
| apache-2.0 |
yoer/hue | desktop/core/ext-py/lxml/src/lxml/html/diff.py | 41 | 30375 | import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import cgi
import re
__all__ = ['html_annotate', 'htmldiff']
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
try:
basestring = __builtins__["basestring"]
except (KeyError, NameError):
# Python 3
basestring = str
############################################################
## Annotation
############################################################
def default_markup(text, version):
return '<span title="%s">%s</span>' % (
cgi.escape(_unicode(version), 1), text)
def html_annotate(doclist, markup=default_markup):
"""
doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(version1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span>
"""
# The basic strategy we have is to split the documents up into
# logical tokens (which are words with attached markup). We then
# do diffs of each of the versions to track when a token first
# appeared in the document; the annotation attached to the token
# is the version where it first appeared.
tokenlist = [tokenize_annotated(doc, version)
for doc, version in doclist]
cur_tokens = tokenlist[0]
for tokens in tokenlist[1:]:
html_annotate_merge_annotations(cur_tokens, tokens)
cur_tokens = tokens
# After we've tracked all the tokens, we can combine spans of text
# that are adjacent and have the same annotation
cur_tokens = compress_tokens(cur_tokens)
# And finally add markup
result = markup_serialize_tokens(cur_tokens, markup)
return ''.join(result).strip()
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
def html_annotate_merge_annotations(tokens_old, tokens_new):
"""Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document.
"""
s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
commands = s.get_opcodes()
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
eq_old = tokens_old[i1:i2]
eq_new = tokens_new[j1:j2]
copy_annotations(eq_old, eq_new)
def copy_annotations(src, dest):
"""
Copy annotations from the tokens listed in src to the tokens in dest
"""
assert len(src) == len(dest)
for src_tok, dest_tok in zip(src, dest):
dest_tok.annotation = src_tok.annotation
def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result
def compress_merge_back(tokens, tok):
""" Merge tok into the last element of tokens (modifying the list of
tokens in-place). """
last = tokens[-1]
if type(last) is not token or type(tok) is not token:
tokens.append(tok)
else:
text = _unicode(last)
if last.trailing_whitespace:
text += ' '
text += tok
merged = token(text,
pre_tags=last.pre_tags,
post_tags=tok.post_tags,
trailing_whitespace=tok.trailing_whitespace)
merged.annotation = last.annotation
tokens[-1] = merged
def markup_serialize_tokens(tokens, markup_func):
"""
Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
html = token.html()
html = markup_func(html, token.annotation)
if token.trailing_whitespace:
html += ' '
yield html
for post in token.post_tags:
yield post
############################################################
## HTML Diffs
############################################################
def htmldiff(old_html, new_html):
## FIXME: this should take parsed documents too, and use their body
## or other content.
""" Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes.
"""
old_html_tokens = tokenize(old_html)
new_html_tokens = tokenize(new_html)
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result)
def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result
def expand_tokens(tokens, equal=False):
"""Given a list of tokens, return a generator of the chunks of
text for the data in the tokens.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
if not equal or not token.hide_when_equal:
if token.trailing_whitespace:
yield token.html() + ' '
else:
yield token.html()
for post in token.post_tags:
yield post
def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end)
# These are sentinals to represent the start and end of a <del>
# segment, until we do the cleanup phase to turn them into proper
# markup:
class DEL_START:
pass
class DEL_END:
pass
class NoDeletes(Exception):
""" Raised when the document no longer contains any pending deletes
(DEL_START/DEL_END) """
def merge_delete(del_chunks, doc):
""" Adds the text chunks in del_chunks to the document doc (another
list of text chunks) with marker to show it is a delete.
cleanup_delete later resolves these markers into <del> tags."""
doc.append(DEL_START)
doc.extend(del_chunks)
doc.append(DEL_END)
def cleanup_delete(chunks):
""" Cleans up any DEL_START/DEL_END markers in the document, replacing
them with <del></del>. To do this while keeping the document
valid, it may need to drop some tags (either start or end tags).
It may also move the del into adjacent tags to try to move it to a
similar location where it was originally located (e.g., moving a
delete into preceding <div> tag, if the del looks like (DEL_START,
'Text</div>', DEL_END)"""
while 1:
# Find a pending DEL_START/DEL_END, splitting the document
# into stuff-preceding-DEL_START, stuff-inside, and
# stuff-following-DEL_END
try:
pre_delete, delete, post_delete = split_delete(chunks)
except NoDeletes:
# Nothing found, we've cleaned up the entire doc
break
# The stuff-inside-DEL_START/END may not be well balanced
# markup. First we figure out what unbalanced portions there are:
unbalanced_start, balanced, unbalanced_end = split_unbalanced(delete)
# Then we move the span forward and/or backward based on these
# unbalanced portions:
locate_unbalanced_start(unbalanced_start, pre_delete, post_delete)
locate_unbalanced_end(unbalanced_end, pre_delete, post_delete)
doc = pre_delete
if doc and not doc[-1].endswith(' '):
# Fix up case where the word before us didn't have a trailing space
doc[-1] += ' '
doc.append('<del>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </del>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</del> ')
doc.extend(post_delete)
chunks = doc
return chunks
def split_unbalanced(chunks):
"""Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks."""
start = []
end = []
tag_stack = []
balanced = []
for chunk in chunks:
if not chunk.startswith('<'):
balanced.append(chunk)
continue
endtag = chunk[1] == '/'
name = chunk.split()[0].strip('<>/')
if name in empty_tags:
balanced.append(chunk)
continue
if endtag:
if tag_stack and tag_stack[-1][0] == name:
balanced.append(chunk)
name, pos, tag = tag_stack.pop()
balanced[pos] = tag
elif tag_stack:
start.extend([tag for name, pos, tag in tag_stack])
tag_stack = []
end.append(chunk)
else:
end.append(chunk)
else:
tag_stack.append((name, len(balanced), chunk))
balanced.append(None)
start.extend(
[chunk for name, pos, chunk in tag_stack])
balanced = [chunk for chunk in balanced if chunk is not None]
return start, balanced, end
def split_delete(chunks):
""" Returns (stuff_before_DEL_START, stuff_inside_DEL_START_END,
stuff_after_DEL_END). Returns the first case found (there may be
more DEL_STARTs in stuff_after_DEL_END). Raises NoDeletes if
there's no DEL_START found. """
try:
pos = chunks.index(DEL_START)
except ValueError:
raise NoDeletes
pos2 = chunks.index(DEL_END)
return chunks[:pos], chunks[pos+1:pos2], chunks[pos2+1:]
def locate_unbalanced_start(unbalanced_start, pre_delete, post_delete):
""" pre_delete and post_delete implicitly point to a place in the
document (where the two were split). This moves that point (by
popping items from one and pushing them onto the other). It moves
the point to try to find a place where unbalanced_start applies.
As an example::
>>> unbalanced_start = ['<div>']
>>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>']
>>> pre, post = doc[:3], doc[3:]
>>> pre, post
(['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>'])
>>> locate_unbalanced_start(unbalanced_start, pre, post)
>>> pre, post
(['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>'])
As you can see, we moved the point so that the dangling <div> that
we found will be effectively replaced by the div in the original
document. If this doesn't work out, we just throw away
unbalanced_start without doing anything.
"""
while 1:
if not unbalanced_start:
# We have totally succeded in finding the position
break
finding = unbalanced_start[0]
finding_name = finding.split()[0].strip('<>')
if not post_delete:
break
next = post_delete[0]
if next is DEL_START or not next.startswith('<'):
# Reached a word, we can't move the delete text forward
break
if next[1] == '/':
# Reached a closing tag, can we go further? Maybe not...
break
name = next.split()[0].strip('<>')
if name == 'ins':
# Can't move into an insert
break
assert name != 'del', (
"Unexpected delete tag: %r" % next)
if name == finding_name:
unbalanced_start.pop(0)
pre_delete.append(post_delete.pop(0))
else:
# Found a tag that doesn't match
break
def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete):
""" like locate_unbalanced_start, except handling end tags and
possibly moving the point earlier in the document. """
while 1:
if not unbalanced_end:
# Success
break
finding = unbalanced_end[-1]
finding_name = finding.split()[0].strip('<>/')
if not pre_delete:
break
next = pre_delete[-1]
if next is DEL_END or not next.startswith('</'):
# A word or a start tag
break
name = next.split()[0].strip('<>/')
if name == 'ins' or name == 'del':
# Can't move into an insert or delete
break
if name == finding_name:
unbalanced_end.pop()
post_delete.insert(0, pre_delete.pop())
else:
# Found a tag that doesn't match
break
class token(_unicode):
""" Represents a diffable token, generally a word that is displayed to
the user. Opening tags are attached to this token when they are
adjacent (pre_tags) and closing tags that follow the word
(post_tags). Some exceptions occur when there are empty tags
adjacent to a word, so there may be close tags in pre_tags, or
open tags in post_tags.
We also keep track of whether the word was originally followed by
whitespace, even though we do not want to treat the word as
equivalent to a similar word that does not have a trailing
space."""
# When this is true, the token will be eliminated from the
# displayed diff if no change has occurred:
hide_when_equal = False
def __new__(cls, text, pre_tags=None, post_tags=None, trailing_whitespace=False):
obj = _unicode.__new__(cls, text)
if pre_tags is not None:
obj.pre_tags = pre_tags
else:
obj.pre_tags = []
if post_tags is not None:
obj.post_tags = post_tags
else:
obj.post_tags = []
obj.trailing_whitespace = trailing_whitespace
return obj
def __repr__(self):
return 'token(%s, %r, %r)' % (_unicode.__repr__(self), self.pre_tags, self.post_tags)
def html(self):
return _unicode(self)
class tag_token(token):
""" Represents a token that is actually a tag. Currently this is just
the <img> tag, which takes up visible space just like a word but
is only represented in a document by a tag. """
def __new__(cls, tag, data, html_repr, pre_tags=None,
post_tags=None, trailing_whitespace=False):
obj = token.__new__(cls, "%s: %s" % (type, data),
pre_tags=pre_tags,
post_tags=post_tags,
trailing_whitespace=trailing_whitespace)
obj.tag = tag
obj.data = data
obj.html_repr = html_repr
return obj
def __repr__(self):
return 'tag_token(%s, %s, html_repr=%s, post_tags=%r, pre_tags=%r, trailing_whitespace=%s)' % (
self.tag,
self.data,
self.html_repr,
self.pre_tags,
self.post_tags,
self.trailing_whitespace)
def html(self):
return self.html_repr
class href_token(token):
""" Represents the href in an anchor tag. Unlike other words, we only
show the href when it changes. """
hide_when_equal = True
def html(self):
return ' Link: %s' % self
def tokenize(html, include_hrefs=True):
"""
Parse the given HTML and returns token objects (words with attached tags).
This parses only the content of a page; anything in the head is
ignored, and the <head> and <body> elements are themselves
optional. The content is then parsed by lxml, which ensures the
validity of the resulting parsed document (though lxml may make
incorrect guesses when the markup is particular bad).
<ins> and <del> tags are also eliminated from the document, as
that gets confusing.
If include_hrefs is true, then the href attribute of <a> tags is
included as a special kind of diffable token."""
if etree.iselement(html):
body_el = html
else:
body_el = parse_html(html, cleanup=True)
# Then we split the document into text chunks for each tag, word, and end tag:
chunks = flatten_el(body_el, skip_tag=True, include_hrefs=include_hrefs)
# Finally re-joining them into token objects:
return fixup_chunks(chunks)
def parse_html(html, cleanup=True):
"""
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
"""
if cleanup:
# This removes any extra markup or structure like <head>:
html = cleanup_html(html)
return fragment_fromstring(html, create_parent=True)
_body_re = re.compile(r'<body.*?>', re.I|re.S)
_end_body_re = re.compile(r'</body.*?>', re.I|re.S)
_ins_del_re = re.compile(r'</?(ins|del).*?>', re.I|re.S)
def cleanup_html(html):
""" This 'cleans' the HTML, meaning that any page structure is removed
(only the contents of <body> are used, if there is any <body).
Also <ins> and <del> tags are removed. """
match = _body_re.search(html)
if match:
html = html[match.end():]
match = _end_body_re.search(html)
if match:
html = html[:match.start()]
html = _ins_del_re.sub('', html)
return html
end_whitespace_re = re.compile(r'[ \t\n\r]$')
def fixup_chunks(chunks):
"""
This function takes a list of chunks and produces a list of tokens.
"""
tag_accum = []
cur_word = None
result = []
for chunk in chunks:
if isinstance(chunk, tuple):
if chunk[0] == 'img':
src = chunk[1]
tag = chunk[2]
if tag.endswith(' '):
tag = tag[:-1]
trailing_whitespace = True
else:
trailing_whitespace = False
cur_word = tag_token('img', src, html_repr=tag,
pre_tags=tag_accum,
trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif chunk[0] == 'href':
href = chunk[1]
cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=True)
tag_accum = []
result.append(cur_word)
continue
if is_word(chunk):
if chunk.endswith(' '):
chunk = chunk[:-1]
trailing_whitespace = True
else:
trailing_whitespace = False
cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif is_start_tag(chunk):
tag_accum.append(chunk)
elif is_end_tag(chunk):
if tag_accum:
tag_accum.append(chunk)
else:
assert cur_word, (
"Weird state, cur_word=%r, result=%r, chunks=%r of %r"
% (cur_word, result, chunk, chunks))
cur_word.post_tags.append(chunk)
else:
assert(0)
if not result:
return [token('', pre_tags=tag_accum)]
else:
result[-1].post_tags.extend(tag_accum)
return result
# All the tags in HTML that don't require end tags:
empty_tags = (
'param', 'img', 'area', 'br', 'basefont', 'input',
'base', 'meta', 'link', 'col')
block_level_tags = (
'address',
'blockquote',
'center',
'dir',
'div',
'dl',
'fieldset',
'form',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'isindex',
'menu',
'noframes',
'noscript',
'ol',
'p',
'pre',
'table',
'ul',
)
block_level_container_tags = (
'dd',
'dt',
'frameset',
'li',
'tbody',
'td',
'tfoot',
'th',
'thead',
'tr',
)
def flatten_el(el, include_hrefs, skip_tag=False):
""" Takes an lxml element el, and generates all the text chunks for
that tag. Each start tag is a chunk, each word is a chunk, and each
end tag is a chunk.
If skip_tag is true, then the outermost container tag is
not returned (just its contents)."""
if not skip_tag:
if el.tag == 'img':
yield ('img', el.attrib['src'], start_tag(el))
else:
yield start_tag(el)
if el.tag in empty_tags and not el.text and not len(el) and not el.tail:
return
start_words = split_words(el.text)
for word in start_words:
yield cgi.escape(word)
for child in el:
for item in flatten_el(child, include_hrefs=include_hrefs):
yield item
if el.tag == 'a' and el.attrib.get('href') and include_hrefs:
yield ('href', el.attrib['href'])
if not skip_tag:
yield end_tag(el)
end_words = split_words(el.tail)
for word in end_words:
yield cgi.escape(word)
def split_words(text):
""" Splits some text into words. Includes trailing whitespace (one
space) on each word when appropriate. """
if not text or not text.strip():
return []
words = [w + ' ' for w in text.strip().split()]
if not end_whitespace_re.search(text):
words[-1] = words[-1][:-1]
return words
start_whitespace_re = re.compile(r'^[ \t\n\r]')
def start_tag(el):
"""
The text representation of the start tag for a tag.
"""
return '<%s%s>' % (
el.tag, ''.join([' %s="%s"' % (name, cgi.escape(value, True))
for name, value in el.attrib.items()]))
def end_tag(el):
""" The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate. """
if el.tail and start_whitespace_re.search(el.tail):
extra = ' '
else:
extra = ''
return '</%s>%s' % (el.tag, extra)
def is_word(tok):
return not tok.startswith('<')
def is_end_tag(tok):
return tok.startswith('</')
def is_start_tag(tok):
return tok.startswith('<') and not tok.startswith('</')
def fixup_ins_del_tags(html):
""" Given an html string, move any <ins> or <del> tags inside of any
block-level elements, e.g. transform <ins><p>word</p></ins> to
<p><ins>word</ins></p> """
doc = parse_html(html, cleanup=False)
_fixup_ins_del_tags(doc)
html = serialize_html_fragment(doc, skip_outer=True)
return html
def serialize_html_fragment(el, skip_outer=False):
""" Serialize a single lxml element as HTML. The serialized form
includes the elements tail.
If skip_outer is true, then don't serialize the outermost tag
"""
assert not isinstance(el, basestring), (
"You should pass in an element, not a string like %r" % el)
html = etree.tostring(el, method="html", encoding=_unicode)
if skip_outer:
# Get rid of the extra starting tag:
html = html[html.find('>')+1:]
# Get rid of the extra end tag:
html = html[:html.rfind('<')]
return html.strip()
else:
return html
def _fixup_ins_del_tags(doc):
"""fixup_ins_del_tags that works on an lxml document in-place
"""
for tag in ['ins', 'del']:
for el in doc.xpath('descendant-or-self::%s' % tag):
if not _contains_block_level_tag(el):
continue
_move_el_inside_block(el, tag=tag)
el.drop_tag()
#_merge_element_contents(el)
def _contains_block_level_tag(el):
"""True if the element contains any block-level elements, like <p>, <td>, etc.
"""
if el.tag in block_level_tags or el.tag in block_level_container_tags:
return True
for child in el:
if _contains_block_level_tag(child):
return True
return False
def _move_el_inside_block(el, tag):
""" helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
and moves them inside any block-level tags. """
for child in el:
if _contains_block_level_tag(child):
break
else:
import sys
# No block-level tags in any child
children_tag = etree.Element(tag)
children_tag.text = el.text
el.text = None
children_tag.extend(list(el))
el[:] = [children_tag]
return
for child in list(el):
if _contains_block_level_tag(child):
_move_el_inside_block(child, tag)
if child.tail:
tail_tag = etree.Element(tag)
tail_tag.text = child.tail
child.tail = None
el.insert(el.index(child)+1, tail_tag)
else:
child_tag = etree.Element(tag)
el.replace(child, child_tag)
child_tag.append(child)
if el.text:
text_tag = etree.Element(tag)
text_tag.text = el.text
el.text = None
el.insert(0, text_tag)
def _merge_element_contents(el):
"""
Removes an element, but merges its contents into its place, e.g.,
given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
<p>Hi there!</p>
"""
parent = el.getparent()
text = el.text or ''
if el.tail:
if not len(el):
text += el.tail
else:
if el[-1].tail:
el[-1].tail += el.tail
else:
el[-1].tail = el.tail
index = parent.index(el)
if text:
if index == 0:
previous = None
else:
previous = parent[index-1]
if previous is None:
if parent.text:
parent.text += text
else:
parent.text = text
else:
if previous.tail:
previous.tail += text
else:
previous.tail = text
parent[index:index+1] = el.getchildren()
class InsensitiveSequenceMatcher(difflib.SequenceMatcher):
"""
Acts like SequenceMatcher, but tries not to find very small equal
blocks amidst large spans of changes
"""
threshold = 2
def get_matching_blocks(self):
size = min(len(self.b), len(self.b))
threshold = min(self.threshold, size / 4)
actual = difflib.SequenceMatcher.get_matching_blocks(self)
return [item for item in actual
if item[2] > threshold
or not item[2]]
if __name__ == '__main__':
from lxml.html import _diffcommand
_diffcommand.main()
| apache-2.0 |
scripnichenko/nova | nova/tests/unit/compute/monitors/cpu/test_virt_driver.py | 16 | 2868 | # Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Compute Driver CPU resource monitor."""
from nova.compute.monitors.cpu import virt_driver
from nova import objects
from nova import test
class FakeDriver(object):
def get_host_cpu_stats(self):
return {'kernel': 5664160000000,
'idle': 1592705190000000,
'frequency': 800,
'user': 26728850000000,
'iowait': 6121490000000}
class FakeResourceTracker(object):
driver = FakeDriver()
class VirtDriverCPUMonitorTestCase(test.NoDBTestCase):
def test_get_metric_names(self):
monitor = virt_driver.Monitor(FakeResourceTracker())
names = monitor.get_metric_names()
self.assertEqual(10, len(names))
self.assertIn("cpu.frequency", names)
self.assertIn("cpu.user.time", names)
self.assertIn("cpu.kernel.time", names)
self.assertIn("cpu.idle.time", names)
self.assertIn("cpu.iowait.time", names)
self.assertIn("cpu.user.percent", names)
self.assertIn("cpu.kernel.percent", names)
self.assertIn("cpu.idle.percent", names)
self.assertIn("cpu.iowait.percent", names)
self.assertIn("cpu.percent", names)
def test_get_metrics(self):
metrics = objects.MonitorMetricList()
monitor = virt_driver.Monitor(FakeResourceTracker())
monitor.add_metrics_to_list(metrics)
names = monitor.get_metric_names()
for metric in metrics.objects:
self.assertIn(metric.name, names)
# Some conversion to a dict to ease testing...
metrics = {m.name: m.value for m in metrics.objects}
self.assertEqual(metrics["cpu.frequency"], 800)
self.assertEqual(metrics["cpu.user.time"], 26728850000000)
self.assertEqual(metrics["cpu.kernel.time"], 5664160000000)
self.assertEqual(metrics["cpu.idle.time"], 1592705190000000)
self.assertEqual(metrics["cpu.iowait.time"], 6121490000000)
self.assertEqual(metrics["cpu.user.percent"], 1)
self.assertEqual(metrics["cpu.kernel.percent"], 0)
self.assertEqual(metrics["cpu.idle.percent"], 97)
self.assertEqual(metrics["cpu.iowait.percent"], 0)
self.assertEqual(metrics["cpu.percent"], 2)
| apache-2.0 |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim/household/is_young.py | 2 | 2051 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
class is_young(Variable):
"""Is the head of the household young. """
age_of_head = "age_of_head"
def dependencies(self):
return [my_attribute_label(self.age_of_head)]
def compute(self, dataset_pool):
return self.get_dataset().get_attribute(self.age_of_head) <= \
dataset_pool.get_dataset('urbansim_constant')["young_age"]
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
from numpy import array
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
variable_name = "urbansim.household.is_young"
def test_my_inputs( self ):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='households',
table_data={
'household_id': array([1, 2, 3, 4]),
'age_of_head': array([12, 20, 25, 30]),
}
)
storage.write_table(
table_name='urbansim_constants',
table_data={
'young_age': array([25]),
}
)
dataset_pool = DatasetPool(package_order=['urbansim'],
storage=storage)
household = dataset_pool.get_dataset('household')
household.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = household.get_attribute(self.variable_name)
should_be = array( [1,1,1,0] )
self.assert_(ma.allequal(values, should_be,),
msg="Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | gpl-2.0 |
AndreaOrru/Yugen | key.py | 1 | 1945 | """Representation of keypresses."""
import curses
from curses import ascii
from curses.ascii import isprint
class Key:
"""Class representing a combination of key presses.
Attributes:
keys: Dictionary that maps key names to codes.
"""
keys = {k[4:]: eval('curses.'+k) for k in dir(curses) if k[:4] == 'KEY_'}
keys['DEL'] = ascii.DEL
keys['TAB'] = ascii.TAB
def __init__(self, key, ctrl=None, meta=None):
"""Initialize a Key object.
Args:
key: The integer ASCII value of the character corresponding to the keypress.
Alternatively, a string in the format C-M-S-k, with C, M, S being optional
modifier keys (Ctrl, Meta, Shift respectively) and k being a character.
In the second case, the next arguments are ignored.
ctrl: Ctrl modifier.
meta: Meta modified.
"""
# key is a string:
try:
self.ctrl = 'C-' in key
self.meta = 'M-' in key
key = key.upper() if ('S-' in key) else key
key = key.split('-')[-1]
self.key = Key.keys[key] if (key in Key.keys) else ord(key)
# key is an integer:
except TypeError:
self.key = key
self.ctrl = ctrl
self.meta = meta
def is_printable(self):
"""Return True if the key corresponds to a printable character, False otherwise."""
return not (self.meta or self.ctrl) and isprint(chr(self.key))
def char(self):
"""Return the character corresponding to the pressed key."""
return chr(self.key)
def __eq__(self, o):
"""Check whether two keys are equal."""
return self.meta == o.meta and self.ctrl == o.ctrl and self.key == o.key
def __hash__(self):
"""Return a hash value uniquely identifying a Key object."""
return self.key << 2 | self.ctrl << 1 | self.meta
| bsd-2-clause |
sneharavi12/DeepLearningFinals | pymunk-pymunk-4.0.0/examples/breakout.py | 5 | 6024 | """Very simple breakout clone. A circle shape serves as the paddle, then
breakable bricks constructed of Poly-shapes.
The code showcases several pymunk concepts such as elasitcity, impulses,
constant object speed, joints, collision handlers and post step callbacks.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import math, sys, random
import os
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk
from pymunk import Vec2d
import pymunk.pygame_util
width, height = 600,600
import pymunk._chipmunk as cp
import pymunk._chipmunk_ffi as cpffi
import ctypes as ct
def spawn_ball(space, position, direction):
ball_body = pymunk.Body(1, pymunk.inf)
ball_body.position = position
ball_shape = pymunk.Circle(ball_body, 5)
ball_shape.color = THECOLORS["green"]
ball_shape.elasticity = 1.0
ball_body.apply_impulse(Vec2d(direction))
# Keep ball velocity at a static value
def constant_velocity(body, gravity, damping, dt):
body.velocity = body.velocity.normalized() * 400
ball_body.velocity_func = constant_velocity
space.add(ball_body, ball_shape)
def setup_level(space, player_body):
# Remove balls and bricks
for s in space.shapes[:]:
if not s.body.is_static and s.body not in [player_body]:
space.remove(s.body, s)
# Spawn a ball for the player to have something to play with
spawn_ball(space, player_body.position + (0,40), random.choice([(1,1),(-1,1)]))
# Spawn bricks
for x in range(0,21):
x = x * 20 + 100
for y in range(0,5):
y = y * 10 + 400
brick_body = pymunk.Body(pymunk.inf, pymunk.inf)
brick_body.position = x, y
brick_shape = pymunk.Poly.create_box(brick_body, (20,10))
brick_shape.elasticity = 1.0
brick_shape.color = THECOLORS['blue']
brick_shape.group = 1
brick_shape.collision_type = 2
space.add(brick_body, brick_shape)
# Make bricks be removed when hit by ball
def remove_first(space, arbiter):
first_shape = arbiter.shapes[0]
space.add_post_step_callback(space.remove, first_shape, first_shape.body)
space.add_collision_handler(2, 0, separate = remove_first)
def main():
### PyGame init
pygame.init()
screen = pygame.display.set_mode((width,height))
clock = pygame.time.Clock()
running = True
font = pygame.font.SysFont("Arial", 16)
### Physics stuff
space = pymunk.Space()
### Game area
# walls - the left-top-right walls
static_lines = [pymunk.Segment(space.static_body, (50, 50), (50, 550), 5)
,pymunk.Segment(space.static_body, (50, 550), (550, 550), 5)
,pymunk.Segment(space.static_body, (550, 550), (550, 50), 5)
]
for line in static_lines:
line.color = THECOLORS['lightgray']
line.elasticity = 1.0
space.add(static_lines)
# bottom - a sensor that removes anything touching it
bottom = pymunk.Segment(space.static_body, (50, 50), (550, 50), 5)
bottom.sensor = True
bottom.collision_type = 1
bottom.color = THECOLORS['red']
def remove_first(space, arbiter):
first_shape = arbiter.shapes[0]
space.add_post_step_callback(space.remove, first_shape, first_shape.body)
return True
space.add_collision_handler(0, 1, begin = remove_first)
space.add(bottom)
### Player ship
player_body = pymunk.Body(500, pymunk.inf)
player_shape = pymunk.Circle(player_body, 35)
player_shape.color = THECOLORS["red"]
player_shape.elasticity = 1.0
player_body.position = 300,100
# restrict movement of player to a straigt line
move_joint = pymunk.GrooveJoint(space.static_body, player_body, (100,100), (500,100), (0,0))
space.add(player_body, player_shape, move_joint)
global state
# Start game
setup_level(space, player_body)
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "breakout.png")
elif event.type == KEYDOWN and event.key == K_LEFT:
player_body.velocity = (-600,0)
elif event.type == KEYUP and event.key == K_LEFT:
player_body.velocity = 0,0
elif event.type == KEYDOWN and event.key == K_RIGHT:
player_body.velocity = (600,0)
elif event.type == KEYUP and event.key == K_RIGHT:
player_body.velocity = 0,0
elif event.type == KEYDOWN and event.key == K_r:
setup_level(space, player_body)
elif event.type == KEYDOWN and event.key == K_SPACE:
spawn_ball(space, player_body.position + (0,40), random.choice([(1,1),(-1,1)]))
### Clear screen
screen.fill(THECOLORS["black"])
### Draw stuff
pymunk.pygame_util.draw(screen, space)
state = []
for x in space.shapes:
s = "%s %s %s" % (x, x.body.position, x.body.velocity)
state.append(s)
### Update physics
fps = 60
dt = 1./fps
space.step(dt)
### Info and flip screen
screen.blit(font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]), (0,0))
screen.blit(font.render("Move with left/right arrows, space to spawn a ball", 1, THECOLORS["darkgrey"]), (5,height - 35))
screen.blit(font.render("Press R to reset, ESC or Q to quit", 1, THECOLORS["darkgrey"]), (5,height - 20))
pygame.display.flip()
clock.tick(fps)
if __name__ == '__main__':
sys.exit(main())
| mit |
fumitoh/modelx | modelx/tests/core/space/test_space_reload.py | 1 | 1096 | import sys
import os.path
import modelx as mx
import modelx.tests.testdata
import pytest
import pathlib
datadir = pathlib.Path(os.path.dirname(mx.tests.testdata.__file__))
@pytest.fixture
def reloadtest(tmp_path):
with open(tmp_path / "__init__.py", "w") as f:
f.write("")
sys.path.insert(0, str(tmp_path))
sample = "reloadtest"
model = mx.new_model()
yield model, sample, tmp_path
if sys.path[0] == str(tmp_path):
del sys.path[0]
def test_space_reload(reloadtest):
import shutil
model, samplename, tempdir = reloadtest
sample = str(tempdir.joinpath(samplename + ".py"))
shutil.copy(str(datadir.joinpath(samplename + "_before.py")), sample)
# import reloadtest as src
import importlib
src = importlib.import_module(samplename)
space = model.import_module(module=src)
assert space.foo(3) == 0
assert "baz" in space.cells
shutil.copy(str(datadir.joinpath(samplename + "_after.py")), sample)
space.reload()
assert space.foo(3) == 1
assert space.bar(3) == 1
assert len(space.baz) == 0
| gpl-3.0 |
hynnet/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/distutils/dist.py | 175 | 50049 | """distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
__revision__ = "$Id$"
import sys, os, re
from email import message_from_file
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import (DistutilsOptionError, DistutilsArgError,
DistutilsModuleError, DistutilsClassError)
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# Regex to define acceptable Distutils command names. This is not *quite*
# the same as a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar is no coincidence; the default naming scheme is
# to look for a Python module named after the command.
command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
class Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
is really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function is totally inadequate to their needs.
However, it is conceivable that a setup script might wish to subclass
Distribution for some specialized purpose, and then pass the subclass
to 'setup()' as the 'distclass' keyword argument. If so, it is
necessary to respect the expectations that 'setup' has of Distribution.
See the code for 'setup()', in core.py, for details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option is also valid as a command option -- and we
# don't want to pollute the commands with too many options that they
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
('no-user-cfg', None,
'ignore pydistutils.cfg in your home directory'),
]
# 'common_usage' is a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are not propagated to the commands
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = map(lambda x: translate_longopt(x[0]),
display_options)
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__ (self, attrs=None):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, and then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes not mentioned in
'attrs' will be assigned to some null value: 0, None, an empty list
or dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled in with real command objects by 'parse_command_line()'.
"""
# Default values for our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, and so
# forth) in a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object in a sneaky and underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to class objects, so we
# can 1) quickly figure out which class to instantiate when
# we need to create a new command object, and 2) have a way
# for the setup script to override command classes
self.cmdclass = {}
# 'command_packages' is a list of packages in which commands
# are searched for. The factory for command 'foo' is expected
# to be named 'foo' in the module 'foo' in one of the packages
# named here. This list is searched from the left; an error
# is raised if no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = None
# 'script_name' and 'script_args' are usually set to sys.argv[0]
# and sys.argv[1:], but they can be overridden when the caller is
# not necessarily a setup script run from the command-line.
self.script_name = None
self.script_args = None
# 'command_options' is where we store command options between
# parsing them (from config files, the command-line, etc.) and when
# they are actually needed -- ie. when the command in question is
# instantiated. It is a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' is the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This is
# filled regardless of whether the run is dry or not. pyversion
# gives sysconfig.get_python_version() if the dist file is
# specific to a Python version, 'any' if it is good for all
# Python versions on the target platform, and '' for a source
# file. pyversion should not be used to specify minimum or
# maximum required Python versions; use the metainfo for that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases for them in
# Distribution as a convenience to the developer.
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
self.password = ''
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# class is a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need for expensive filesystem
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# been instantiated -- a false value will be inserted when the
# command object is created, and replaced with a true value when
# the command is successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any or all of these
# distribution options.
if attrs:
# Pull out the set of command options and work on them
# specifically. Note that this order guarantees that aliased
# command options will override any supplied redundantly
# through the general options dictionary.
options = attrs.get('options')
if options is not None:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# Now work on the rest of the attributes. Any attribute that's
# not already defined is invalid!
for (key, val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# no-user-cfg is handled before other command line args
# because other args override the config files, and this
# one is needed before we can load the config files.
# If attrs['script_args'] wasn't passed, assume false.
#
# This also make sure we just look at the global options
self.want_user_cfg = True
if self.script_args is not None:
for arg in self.script_args:
if not arg.startswith('-'):
break
if arg == '--no-user-cfg':
self.want_user_cfg = False
break
self.finalize_options()
def get_option_dict(self, command):
"""Get the option dictionary for a given command. If that
command's option dictionary hasn't been created yet, then create it
and return the new dictionary; otherwise, return the existing
option dictionary.
"""
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts(self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = self.command_options.keys()
commands.sort()
if header is not None:
self.announce(indent + header)
indent = indent + " "
if not commands:
self.announce(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
self.announce(indent +
"no option dict for '%s' command" % cmd_name)
else:
self.announce(indent +
"option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split('\n'):
self.announce(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files
def parse_config_files(self, filenames=None):
from ConfigParser import ConfigParser
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__':
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError, msg:
raise DistutilsOptionError, msg
# -- Command-line parsing methods ----------------------------------
def parse_command_line(self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError, "no commands supplied"
# All is well: return true
return 1
def _get_toplevel_options(self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts(self, parser, args):
"""Parse the command-line options for a single command.
'parser' must be a FancyGetopt instance; 'args' must be the list
of arguments, starting with the current command (whose options
we are about to parse). Returns a new version of 'args' with
the next command at the front of the list; will be the empty
list if there are no more commands on the command line. Returns
None if the user asked for help on this command.
"""
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit, "invalid command name '%s'" % command
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
# it takes.
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError, msg:
raise DistutilsArgError, msg
# Require that the command class be derived from Command -- want
# to be sure that the basic "command" interface is implemented.
if not issubclass(cmd_class, Command):
raise DistutilsClassError, \
"command class %s must subclass Command" % cmd_class
# Also make sure that the command object provides a list of its
# known options.
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
raise DistutilsClassError, \
("command class %s must provide " +
"'user_options' attribute (a list of tuples)") % \
cmd_class
# If the command class has a list of negative alias options,
# merge it in with the global negative aliases.
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = negative_opt.copy()
negative_opt.update(cmd_class.negative_opt)
# Check for help_options in command class. They have a different
# format (tuple of four) so we need to preprocess them here.
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
# All commands support the global options too, just by adding
# in 'global_options'.
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
if hasattr(func, '__call__'):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
# Put the options from the command-line into their official
# holding pen, the 'command_options' dictionary.
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options(self):
"""Set final values for all the options on the Distribution
instance, analogous to the .finalize_options() method of Command
objects.
"""
for attr in ('keywords', 'platforms'):
value = getattr(self.metadata, attr)
if value is None:
continue
if isinstance(value, str):
value = [elm.strip() for elm in value.split(',')]
setattr(self.metadata, attr, value)
def _show_help(self, parser, global_options=1, display_options=1,
commands=[]):
"""Show help for the setup script command-line in the form of
several lists of command-line options. 'parser' should be a
FancyGetopt instance; do not expect it to be returned in the
same state, as its option table will be reset to make it
generate the correct help text.
If 'global_options' is true, lists the global options:
--verbose, --dry-run, etc. If 'display_options' is true, lists
the "display-only" options: --name, --version, etc. Finally,
lists per-command help for every command name or command class
in 'commands'.
"""
# late import because of mutual dependence between these modules
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print('')
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print('')
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print('')
print(gen_usage(self.script_name))
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
from distutils.core import gen_usage
# User just wants a list of commands -- we'll print it out and stop
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print('')
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list(self, commands, header, max_length):
"""Print a subset of the list of all commands -- used by
'print_commands()'.
"""
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands(self):
"""Print out a help message listing all available commands with a
description of each. The list is divided into "standard commands"
(listed in distutils.command.__all__) and "extra commands"
(mentioned in self.cmdclass, but not a standard command). The
descriptions come from the command class attribute
'description'.
"""
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list(self):
"""Get a list of (command, description) tuples.
The list is divided into "standard commands" (listed in
distutils.command.__all__) and "extra commands" (mentioned in
self.cmdclass, but not a standard command). The descriptions come
from the command class attribute 'description'.
"""
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages(self):
"""Return a list of packages from which commands are loaded."""
pkgs = self.command_packages
if not isinstance(pkgs, list):
if pkgs is None:
pkgs = ''
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class(self, command):
"""Return the class that implements the Distutils command named by
'command'. First we check the 'cmdclass' dictionary; if the
command is mentioned there, we fetch the class object from the
dictionary and return it. Otherwise we load the command module
("distutils.command." + command) and fetch the command class from
the module. The loaded class is also stored in 'cmdclass'
to speed future calls to 'get_command_class()'.
Raises DistutilsModuleError if the expected module could not be
found, or if that module does not define the expected class.
"""
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__ (module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError, \
"invalid command '%s' (no class '%s' in module '%s')" \
% (command, klass_name, module_name)
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj(self, command, create=1):
"""Return the command object for 'command'. Normally this object
is cached on a previous call to 'get_command_obj()'; if no command
object for 'command' is in the cache, then we either create and
return it (if 'create' is true) or return None.
"""
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
self.announce("Distribution.get_command_obj(): " \
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
# until 'finalize_options()' is called, which means
# we won't report the source of the error.)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options(self, command_obj, option_dict=None):
"""Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = map(translate_longopt, command_obj.boolean_options)
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError, \
("error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError, msg:
raise DistutilsOptionError, msg
def reinitialize_command(self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce(self, msg, level=log.INFO):
log.log(level, msg)
def run_commands(self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command(self, command):
"""Do whatever it takes to run a command (including nothing at all,
if the command has already been run). Specifically: if we have
already created and run the command named by 'command', return
silently without doing anything. If the command named by 'command'
doesn't even have a command object yet, create one. Then invoke
'run()' on that command object (or an existing one).
"""
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules(self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules(self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries(self):
return self.libraries and len(self.libraries) > 0
def has_modules(self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers(self):
return self.headers and len(self.headers) > 0
def has_scripts(self):
return self.scripts and len(self.scripts) > 0
def has_data_files(self):
return self.data_files and len(self.data_files) > 0
def is_pure(self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
# they are defined in a sneaky way: the constructor binds self.get_XXX
# to self.metadata.get_XXX. The actual code is in the
# DistributionMetadata class, below.
class DistributionMetadata:
"""Dummy class to hold the distribution meta-data: name, version,
author, and so forth.
"""
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "license", "classifiers",
"download_url",
# PEP 314
"provides", "requires", "obsoletes",
)
def __init__(self, path=None):
if path is not None:
self.read_pkg_file(open(path))
else:
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
def read_pkg_file(self, file):
"""Reads the metadata values from a file object."""
msg = message_from_file(file)
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
metadata_version = msg['metadata-version']
self.name = _read_field('name')
self.version = _read_field('version')
self.description = _read_field('summary')
# we are filling author only.
self.author = _read_field('author')
self.maintainer = None
self.author_email = _read_field('author-email')
self.maintainer_email = None
self.url = _read_field('home-page')
self.license = _read_field('license')
if 'download-url' in msg:
self.download_url = _read_field('download-url')
else:
self.download_url = None
self.long_description = _read_field('description')
self.description = _read_field('summary')
if 'keywords' in msg:
self.keywords = _read_field('keywords').split(',')
self.platforms = _read_list('platform')
self.classifiers = _read_list('classifier')
# PEP 314 - these fields only exist in 1.1
if metadata_version == '1.1':
self.requires = _read_list('requires')
self.provides = _read_list('provides')
self.obsoletes = _read_list('obsoletes')
else:
self.requires = None
self.provides = None
self.obsoletes = None
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
pkg_info = open(os.path.join(base_dir, 'PKG-INFO'), 'w')
try:
self.write_pkg_file(pkg_info)
finally:
pkg_info.close()
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
self._write_field(file, 'Metadata-Version', version)
self._write_field(file, 'Name', self.get_name())
self._write_field(file, 'Version', self.get_version())
self._write_field(file, 'Summary', self.get_description())
self._write_field(file, 'Home-page', self.get_url())
self._write_field(file, 'Author', self.get_contact())
self._write_field(file, 'Author-email', self.get_contact_email())
self._write_field(file, 'License', self.get_license())
if self.download_url:
self._write_field(file, 'Download-URL', self.download_url)
long_desc = rfc822_escape(self.get_long_description())
self._write_field(file, 'Description', long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
self._write_field(file, 'Keywords', keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_field(self, file, name, value):
file.write('%s: %s\n' % (name, self._encode_field(value)))
def _write_list (self, file, name, values):
for value in values:
self._write_field(file, name, value)
def _encode_field(self, value):
if value is None:
return None
if isinstance(value, unicode):
return value.encode(PKG_INFO_ENCODING)
return str(value)
# -- Metadata query methods ----------------------------------------
def get_name(self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname(self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self._encode_field(self.author) or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self._encode_field(self.maintainer) or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return (self._encode_field(self.maintainer) or
self._encode_field(self.author) or "UNKNOWN")
def get_contact_email(self):
return self.maintainer_email or self.author_email or "UNKNOWN"
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self._encode_field(self.description) or "UNKNOWN"
def get_long_description(self):
return self._encode_field(self.long_description) or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
# PEP 314
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = value
def fix_help_options(options):
"""Convert a 4-tuple 'help_options' list as found in various command
classes to the 3-tuple form required by FancyGetopt.
"""
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
| gpl-2.0 |
vetal4444/python-goose | tests/extractors/title.py | 13 | 1268 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from base import TestExtractionBase
class TestTitle(TestExtractionBase):
def test_title_opengraph(self):
article = self.getArticle()
fields = ['title']
self.runArticleAssertions(article=article, fields=fields)
def test_title_empty(self):
article = self.getArticle()
fields = ['title']
self.runArticleAssertions(article=article, fields=fields)
| apache-2.0 |
munyirik/python | cpython/Lib/distutils/tests/test_install.py | 12 | 8346 | """Tests for distutils.command.install."""
import os
import sys
import unittest
import site
from test.support import captured_stdout, run_unittest
from distutils import sysconfig
from distutils.command.install import install
from distutils.command import install as install_module
from distutils.command.build_ext import build_ext
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import Distribution
from distutils.errors import DistutilsOptionError
from distutils.extension import Extension
from distutils.tests import support
def _make_ext_name(modname):
return modname + sysconfig.get_config_var('EXT_SUFFIX')
class InstallTestCase(support.TempdirManager,
support.EnvironGuard,
support.LoggingSilencer,
unittest.TestCase):
def test_home_installation_scheme(self):
# This ensure two things:
# - that --home generates the desired set of directory names
# - test --home is supported on all platforms
builddir = self.mkdtemp()
destination = os.path.join(builddir, "installation")
dist = Distribution({"name": "foopkg"})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(builddir, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
build_base=builddir,
build_lib=os.path.join(builddir, "lib"),
)
cmd = install(dist)
cmd.home = destination
cmd.ensure_finalized()
self.assertEqual(cmd.install_base, destination)
self.assertEqual(cmd.install_platbase, destination)
def check_path(got, expected):
got = os.path.normpath(got)
expected = os.path.normpath(expected)
self.assertEqual(got, expected)
libdir = os.path.join(destination, "lib", "python")
check_path(cmd.install_lib, libdir)
check_path(cmd.install_platlib, libdir)
check_path(cmd.install_purelib, libdir)
check_path(cmd.install_headers,
os.path.join(destination, "include", "python", "foopkg"))
check_path(cmd.install_scripts, os.path.join(destination, "bin"))
check_path(cmd.install_data, destination)
def test_user_site(self):
# test install with --user
# preparing the environment for the test
self.old_user_base = site.USER_BASE
self.old_user_site = site.USER_SITE
self.tmpdir = self.mkdtemp()
self.user_base = os.path.join(self.tmpdir, 'B')
self.user_site = os.path.join(self.tmpdir, 'S')
site.USER_BASE = self.user_base
site.USER_SITE = self.user_site
install_module.USER_BASE = self.user_base
install_module.USER_SITE = self.user_site
def _expanduser(path):
return self.tmpdir
self.old_expand = os.path.expanduser
os.path.expanduser = _expanduser
def cleanup():
site.USER_BASE = self.old_user_base
site.USER_SITE = self.old_user_site
install_module.USER_BASE = self.old_user_base
install_module.USER_SITE = self.old_user_site
os.path.expanduser = self.old_expand
self.addCleanup(cleanup)
for key in ('nt_user', 'unix_user'):
self.assertIn(key, INSTALL_SCHEMES)
dist = Distribution({'name': 'xx'})
cmd = install(dist)
# making sure the user option is there
options = [name for name, short, lable in
cmd.user_options]
self.assertIn('user', options)
# setting a value
cmd.user = 1
# user base and site shouldn't be created yet
self.assertFalse(os.path.exists(self.user_base))
self.assertFalse(os.path.exists(self.user_site))
# let's run finalize
cmd.ensure_finalized()
# now they should
self.assertTrue(os.path.exists(self.user_base))
self.assertTrue(os.path.exists(self.user_site))
self.assertIn('userbase', cmd.config_vars)
self.assertIn('usersite', cmd.config_vars)
def test_handle_extra_path(self):
dist = Distribution({'name': 'xx', 'extra_path': 'path,dirs'})
cmd = install(dist)
# two elements
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, ['path', 'dirs'])
self.assertEqual(cmd.extra_dirs, 'dirs')
self.assertEqual(cmd.path_file, 'path')
# one element
cmd.extra_path = ['path']
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, ['path'])
self.assertEqual(cmd.extra_dirs, 'path')
self.assertEqual(cmd.path_file, 'path')
# none
dist.extra_path = cmd.extra_path = None
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, None)
self.assertEqual(cmd.extra_dirs, '')
self.assertEqual(cmd.path_file, None)
# three elements (no way !)
cmd.extra_path = 'path,dirs,again'
self.assertRaises(DistutilsOptionError, cmd.handle_extra_path)
def test_finalize_options(self):
dist = Distribution({'name': 'xx'})
cmd = install(dist)
# must supply either prefix/exec-prefix/home or
# install-base/install-platbase -- not both
cmd.prefix = 'prefix'
cmd.install_base = 'base'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
# must supply either home or prefix/exec-prefix -- not both
cmd.install_base = None
cmd.home = 'home'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
# can't combine user with prefix/exec_prefix/home or
# install_(plat)base
cmd.prefix = None
cmd.user = 'user'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
def test_record(self):
install_dir = self.mkdtemp()
project_dir, dist = self.create_dist(py_modules=['hello'],
scripts=['sayhi'])
os.chdir(project_dir)
self.write_file('hello.py', "def main(): print('o hai')")
self.write_file('sayhi', 'from hello import main; main()')
cmd = install(dist)
dist.command_obj['install'] = cmd
cmd.root = install_dir
cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
f = open(cmd.record)
try:
content = f.read()
finally:
f.close()
found = [os.path.basename(line) for line in content.splitlines()]
expected = ['hello.py', 'hello.%s.pyc' % sys.implementation.cache_tag,
'sayhi',
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
self.assertEqual(found, expected)
def test_record_extensions(self):
install_dir = self.mkdtemp()
project_dir, dist = self.create_dist(ext_modules=[
Extension('xx', ['xxmodule.c'])])
os.chdir(project_dir)
support.copy_xxmodule_c(project_dir)
buildextcmd = build_ext(dist)
support.fixup_build_ext(buildextcmd)
buildextcmd.ensure_finalized()
cmd = install(dist)
dist.command_obj['install'] = cmd
dist.command_obj['build_ext'] = buildextcmd
cmd.root = install_dir
cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
f = open(cmd.record)
try:
content = f.read()
finally:
f.close()
found = [os.path.basename(line) for line in content.splitlines()]
expected = [_make_ext_name('xx'),
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
self.assertEqual(found, expected)
def test_debug_mode(self):
# this covers the code called when DEBUG is set
old_logs_len = len(self.logs)
install_module.DEBUG = True
try:
with captured_stdout():
self.test_record()
finally:
install_module.DEBUG = False
self.assertGreater(len(self.logs), old_logs_len)
def test_suite():
return unittest.makeSuite(InstallTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| bsd-3-clause |
astaff/ansible | test/units/parsing/test_data_loader.py | 64 | 3283 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import PY3
from yaml.scanner import ScannerError
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping
class TestDataLoader(unittest.TestCase):
def setUp(self):
# FIXME: need to add tests that utilize vault_password
self._loader = DataLoader()
def tearDown(self):
pass
@patch.object(DataLoader, '_get_file_contents')
def test_parse_json_from_file(self, mock_def):
mock_def.return_value = ("""{"a": 1, "b": 2, "c": 3}""", True)
output = self._loader.load_from_file('dummy_json.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_yaml_from_file(self, mock_def):
mock_def.return_value = ("""
a: 1
b: 2
c: 3
""", True)
output = self._loader.load_from_file('dummy_yaml.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_fail_from_file(self, mock_def):
mock_def.return_value = ("""
TEXT:
***
NOT VALID
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
self._loader.set_vault_password('ansible')
def tearDown(self):
pass
@patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
def test_parse_from_vault_1_1_file(self):
vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
33343734386261666161626433386662623039356366656637303939306563376130623138626165
6436333766346533353463636566313332623130383662340a393835656134633665333861393331
37666233346464636263636530626332623035633135363732623332313534306438393366323966
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
if PY3:
builtins_name = 'builtins'
else:
builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data)):
output = self._loader.load_from_file('dummy_vault.txt')
self.assertEqual(output, dict(foo='bar'))
| gpl-3.0 |
tsrnnash/bg8-cdw11 | static/plugin/liquid_tags/gram.py | 271 | 3656 | """
Instagram Image Tag
-------------------
By `Tom Spalding <https://github.com/digitalvapor>`_
You can see a working example at `antivapor.net/instagram-tag.html <http://antivapor.net/instagram-tag.html>`_.
Based on `Liquid Image Tag <https://github.com/getpelican/pelican-plugins/blob/master/liquid_tags/img.py>`_ by `Jake Vanderplas <https://github.com/jakevdp>`_.
Optional Todo:
* Query JSON to automatically include descriptions.
http://api.instagram.com/oembed?url=http://instagr.am/p/olw8jXiz1_/
and option to add wrapping anchor link to original http://instagram.com/p/olw8jXiz1_
* Default to size m
http://instagr.am/p/olw8jXiz1_/media/?size=t
http://instagr.am/p/olw8jXiz1_/media
* Provide examples using with [Better Figures and Images](https://github.com/getpelican/pelican-plugins/tree/master/better_figures_and_images).
Syntax
------
{% gram shortcode [size] [width] [class name(s)] [title text | "title text" ["alt text"]] %}
where size is t, m, or l, and it defaults to m. see http://instagram.com/developer/embedding.
Examples
--------
{% gram pFG7naIZkr t %}
{% gram pFJE11IZnx %}
{% gram pFI0CAIZna l 400 figure 'pretty turkey tail fungus' %}
{% gram rOru21oZpe l 450 test_class instagram 'warehouse window title' 'alt text' %}
Output
------
<img src="http://photos-c.ak.instagram.com/hphotos-ak-xaf1/t51.2885-15/917172_604907902963826_254280879_n.jpg" width="450" title="warehouse window title" alt="alt text" class="test_class instagram">
"""
import re
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% gram shortcode [size] [width] [class name(s)] [title text | "title text" ["alt text"]] %}'
# Regular expression for full syntax
# ReGram = re.compile("""(?P<shortcode>\S+)(?:\s+(?P<size>[tml]?))?(?:\s+(?P<width>\d*))?(?:\s+(?P<class>\S*))?(?P<title>\s+.+)?""")
ReGram = re.compile("""(?P<shortcode>\S+)(?:\s+(?P<size>[tml]?))?(?:\s+(?P<width>\d*))?(?:\s+(?P<class>[^']*))?(?P<title>.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
@LiquidTags.register('gram')
def gram(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReGram.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in match.groupdict().items() if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Construct URI
#print(attrs)
shortcode = attrs['shortcode']
url = 'http://instagr.am/p/'+shortcode+'/media/'
del attrs['shortcode']
if 'size' in attrs:
size = '?size={0}'.format(attrs['size'])
url = url+size
del attrs['size']
r = urlopen(url)
if(r.getcode()==404):
raise ValueError('%s isnt a photo.'%shortcode)
gram_url = r.geturl()
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
#print('updated dict: '+repr(attrs))
# Return the formatted text
return '<img src="{0}"{1}>'.format(gram_url,' '.join(' {0}="{1}"'.format(key,val) for (key,val) in attrs.items()))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
BAMitUp/Fantasy-Football-Shuffler | ENV/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.py | 536 | 6090 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import errno
import os
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if time.time() > end_time:
if timeout is not None and timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
pidfile.write("%s\n" % pid)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| gpl-3.0 |
1013553207/django | tests/template_tests/filter_tests/test_linenumbers.py | 331 | 1992 | from django.template.defaultfilters import linenumbers
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LinenumbersTests(SimpleTestCase):
"""
The contents of "linenumbers" is escaped according to the current
autoescape setting.
"""
@setup({'linenumbers01': '{{ a|linenumbers }} {{ b|linenumbers }}'})
def test_linenumbers01(self):
output = self.engine.render_to_string(
'linenumbers01',
{'a': 'one\n<two>\nthree', 'b': mark_safe('one\n<two>\nthree')},
)
self.assertEqual(output, '1. one\n2. <two>\n3. three '
'1. one\n2. <two>\n3. three')
@setup({'linenumbers02':
'{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}{% endautoescape %}'})
def test_linenumbers02(self):
output = self.engine.render_to_string(
'linenumbers02',
{'a': 'one\n<two>\nthree', 'b': mark_safe('one\n<two>\nthree')},
)
self.assertEqual(output, '1. one\n2. <two>\n3. three '
'1. one\n2. <two>\n3. three')
class FunctionTests(SimpleTestCase):
def test_linenumbers(self):
self.assertEqual(linenumbers('line 1\nline 2'), '1. line 1\n2. line 2')
def test_linenumbers2(self):
self.assertEqual(
linenumbers('\n'.join(['x'] * 10)),
'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. x\n08. x\n09. x\n10. x',
)
def test_non_string_input(self):
self.assertEqual(linenumbers(123), '1. 123')
def test_autoescape(self):
self.assertEqual(
linenumbers('foo\n<a>bar</a>\nbuz'),
'1. foo\n2. <a>bar</a>\n3. buz',
)
def test_autoescape_off(self):
self.assertEqual(
linenumbers('foo\n<a>bar</a>\nbuz', autoescape=False),
'1. foo\n2. <a>bar</a>\n3. buz'
)
| bsd-3-clause |
mjafin/bcbio-nextgen | bcbio/distributed/multitasks.py | 1 | 7491 | """Multiprocessing ready entry points for sample analysis.
"""
from bcbio import heterogeneity, hla, structural, utils, chipseq, upload
from bcbio.bam import callable
from bcbio.srna import sample as srna
from bcbio.srna import group as seqcluster
from bcbio.chipseq import peaks
from bcbio.cwl import create as cwl_create
from bcbio.rnaseq import (sailfish, rapmap, salmon, umi)
from bcbio.ngsalign import alignprep
from bcbio.pipeline import (archive, disambiguate, qcsummary, region, sample,
main, shared, variation, run_info, rnaseq)
from bcbio.qc import multiqc, qsignature
from bcbio.variation import (bamprep, bedutils, genotype, ensemble,
joint, multi, population, recalibrate, validate,
vcfutils)
@utils.map_wrap
def run_tagcount(*args):
return umi.tagcount(*args)
@utils.map_wrap
def run_filter_barcodes(*args):
return umi.filter_barcodes(*args)
@utils.map_wrap
def run_barcode_histogram(*args):
return umi.barcode_histogram(*args)
@utils.map_wrap
def run_umi_transform(*args):
return umi.umi_transform(*args)
@utils.map_wrap
def run_salmon_reads(*args):
return salmon.run_salmon_reads(*args)
@utils.map_wrap
def run_salmon_bam(*args):
return salmon.run_salmon_bam(*args)
@utils.map_wrap
def run_sailfish(*args):
return sailfish.run_sailfish(*args)
@utils.map_wrap
def run_rapmap_align(*args):
return rapmap.run_rapmap_align(*args)
@utils.map_wrap
def prepare_sample(*args):
return sample.prepare_sample(*args)
@utils.map_wrap
def prepare_bcbio_samples(*args):
return sample.prepare_bcbio_samples(*args)
@utils.map_wrap
def trim_sample(*args):
return sample.trim_sample(*args)
@utils.map_wrap
def trim_srna_sample(*args):
return srna.trim_srna_sample(*args)
@utils.map_wrap
def process_alignment(*args):
return sample.process_alignment(*args)
@utils.map_wrap
def postprocess_alignment(*args):
return sample.postprocess_alignment(*args)
@utils.map_wrap
def prep_samples(*args):
return sample.prep_samples(*args)
@utils.map_wrap
def srna_annotation(*args):
return srna.sample_annotation(*args)
@utils.map_wrap
def seqcluster_prepare(*args):
return seqcluster.run_prepare(*args)
@utils.map_wrap
def seqcluster_cluster(*args):
return seqcluster.run_cluster(*args)
@utils.map_wrap
def srna_alignment(*args):
return seqcluster.run_align(*args)
@utils.map_wrap
def peakcalling(*args):
return peaks.calling(*args)
@utils.map_wrap
def prep_align_inputs(*args):
return alignprep.create_inputs(*args)
@utils.map_wrap
def merge_sample(*args):
return sample.merge_sample(*args)
@utils.map_wrap
def delayed_bam_merge(*args):
return sample.delayed_bam_merge(*args)
@utils.map_wrap
def merge_split_alignments(*args):
return sample.merge_split_alignments(*args)
@utils.map_wrap
def piped_bamprep(*args):
return bamprep.piped_bamprep(*args)
@utils.map_wrap
def prep_recal(*args):
return recalibrate.prep_recal(*args)
@utils.map_wrap
def split_variants_by_sample(*args):
return multi.split_variants_by_sample(*args)
@utils.map_wrap
def postprocess_variants(*args):
return variation.postprocess_variants(*args)
@utils.map_wrap
def pipeline_summary(*args):
return qcsummary.pipeline_summary(*args)
@utils.map_wrap
def qsignature_summary(*args):
return qsignature.summary(*args)
@utils.map_wrap
def multiqc_summary(*args):
return multiqc.summary(*args)
@utils.map_wrap
def generate_transcript_counts(*args):
return rnaseq.generate_transcript_counts(*args)
@utils.map_wrap
def run_cufflinks(*args):
return rnaseq.run_cufflinks(*args)
@utils.map_wrap
def run_stringtie_expression(*args):
return rnaseq.run_stringtie_expression(*args)
@utils.map_wrap
def run_express(*args):
return rnaseq.run_express(*args)
@utils.map_wrap
def run_dexseq(*args):
return rnaseq.run_dexseq(*args)
@utils.map_wrap
def run_rnaseq_variant_calling(*args):
return rnaseq.run_rnaseq_variant_calling(*args)
@utils.map_wrap
def run_rnaseq_joint_genotyping(*args):
return rnaseq.run_rnaseq_joint_genotyping(*args)
@utils.map_wrap
def combine_bam(*args):
return shared.combine_bam(*args)
@utils.map_wrap
def batch_for_variantcall(*args):
return genotype.batch_for_variantcall(*args)
@utils.map_wrap
def variantcall_batch_region(*args):
return genotype.variantcall_batch_region(*args)
@utils.map_wrap
def concat_batch_variantcalls(*args):
return genotype.concat_batch_variantcalls(*args)
@utils.map_wrap
def get_parallel_regions(*args):
return region.get_parallel_regions(*args)
@utils.map_wrap
def variantcall_sample(*args):
return genotype.variantcall_sample(*args)
@utils.map_wrap
def combine_variant_files(*args):
return vcfutils.combine_variant_files(*args)
@utils.map_wrap
def concat_variant_files(*args):
return vcfutils.concat_variant_files(*args)
@utils.map_wrap
def merge_variant_files(*args):
return vcfutils.merge_variant_files(*args)
@utils.map_wrap
def call_hla(*args):
return hla.call_hla(*args)
@utils.map_wrap
def detect_sv(*args):
return structural.detect_sv(*args)
@utils.map_wrap
def validate_sv(*args):
return structural.validate_sv(*args)
@utils.map_wrap
def heterogeneity_estimate(*args):
return heterogeneity.estimate(*args)
@utils.map_wrap
def finalize_sv(*args):
return structural.finalize_sv(*args)
@utils.map_wrap
def combine_calls(*args):
return ensemble.combine_calls(*args)
@utils.map_wrap
def prep_gemini_db(*args):
return population.prep_gemini_db(*args)
@utils.map_wrap
def combine_bed(*args):
return bedutils.combine(*args)
@utils.map_wrap
def calc_callable_loci(*args):
return callable.calc_callable_loci(*args)
@utils.map_wrap
def combine_sample_regions(*args):
return callable.combine_sample_regions(*args)
@utils.map_wrap
def compare_to_rm(*args):
return validate.compare_to_rm(*args)
@utils.map_wrap
def run_disambiguate(*args):
return disambiguate.run(*args)
@utils.map_wrap
def disambiguate_split(*args):
return disambiguate.split(*args)
@utils.map_wrap
def disambiguate_merge_extras(*args):
return disambiguate.merge_extras(*args)
@utils.map_wrap
def clean_chipseq_alignment(*args):
return chipseq.clean_chipseq_alignment(*args)
@utils.map_wrap
def archive_to_cram(*args):
return archive.to_cram(*args)
@utils.map_wrap
def square_batch_region(*args):
return joint.square_batch_region(*args)
@utils.map_wrap
def cufflinks_assemble(*args):
return rnaseq.cufflinks_assemble(*args)
@utils.map_wrap
def cufflinks_merge(*args):
return rnaseq.cufflinks_merge(*args)
@utils.map_wrap
def stringtie_merge(*args):
return rnaseq.stringtie_merge(*args)
@utils.map_wrap
def organize_samples(*args):
return run_info.organize(*args)
@utils.map_wrap
def prep_system(*args):
return run_info.prep_system(*args)
@utils.map_wrap
def upload_samples(*args):
return upload.from_sample(*args)
@utils.map_wrap
def upload_samples_project(*args):
return upload.project_from_sample(*args)
@utils.map_wrap
def create_cwl(*args):
return cwl_create.from_world(*args)
@utils.map_wrap
def run_main(*args):
work_dir, ready_config_file, systemconfig, fcdir, parallel, samples = args
return main.run_main(work_dir, run_info_yaml=ready_config_file,
config_file=systemconfig, fc_dir=fcdir,
parallel=parallel, samples=samples)
| mit |
josn-jys/git-repo | subcmds/grep.py | 48 | 7883 | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from color import Coloring
from command import PagedCommand
from git_command import git_require, GitCommand
class GrepColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'grep')
self.project = self.printer('project', attr='bold')
class Grep(PagedCommand):
common = True
helpSummary = "Print lines matching a pattern"
helpUsage = """
%prog {pattern | -e pattern} [<project>...]
"""
helpDescription = """
Search for the specified patterns in all project files.
Boolean Options
---------------
The following options can appear as often as necessary to express
the pattern to locate:
-e PATTERN
--and, --or, --not, -(, -)
Further, the -r/--revision option may be specified multiple times
in order to scan multiple trees. If the same file matches in more
than one tree, only the first result is reported, prefixed by the
revision name it was found under.
Examples
-------
Look for a line that has '#define' and either 'MAX_PATH or 'PATH_MAX':
repo grep -e '#define' --and -\( -e MAX_PATH -e PATH_MAX \)
Look for a line that has 'NODE' or 'Unexpected' in files that
contain a line that matches both expressions:
repo grep --all-match -e NODE -e Unexpected
"""
def _Options(self, p):
def carry(option,
opt_str,
value,
parser):
pt = getattr(parser.values, 'cmd_argv', None)
if pt is None:
pt = []
setattr(parser.values, 'cmd_argv', pt)
if opt_str == '-(':
pt.append('(')
elif opt_str == '-)':
pt.append(')')
else:
pt.append(opt_str)
if value is not None:
pt.append(value)
g = p.add_option_group('Sources')
g.add_option('--cached',
action='callback', callback=carry,
help='Search the index, instead of the work tree')
g.add_option('-r','--revision',
dest='revision', action='append', metavar='TREEish',
help='Search TREEish, instead of the work tree')
g = p.add_option_group('Pattern')
g.add_option('-e',
action='callback', callback=carry,
metavar='PATTERN', type='str',
help='Pattern to search for')
g.add_option('-i', '--ignore-case',
action='callback', callback=carry,
help='Ignore case differences')
g.add_option('-a','--text',
action='callback', callback=carry,
help="Process binary files as if they were text")
g.add_option('-I',
action='callback', callback=carry,
help="Don't match the pattern in binary files")
g.add_option('-w', '--word-regexp',
action='callback', callback=carry,
help='Match the pattern only at word boundaries')
g.add_option('-v', '--invert-match',
action='callback', callback=carry,
help='Select non-matching lines')
g.add_option('-G', '--basic-regexp',
action='callback', callback=carry,
help='Use POSIX basic regexp for patterns (default)')
g.add_option('-E', '--extended-regexp',
action='callback', callback=carry,
help='Use POSIX extended regexp for patterns')
g.add_option('-F', '--fixed-strings',
action='callback', callback=carry,
help='Use fixed strings (not regexp) for pattern')
g = p.add_option_group('Pattern Grouping')
g.add_option('--all-match',
action='callback', callback=carry,
help='Limit match to lines that have all patterns')
g.add_option('--and', '--or', '--not',
action='callback', callback=carry,
help='Boolean operators to combine patterns')
g.add_option('-(','-)',
action='callback', callback=carry,
help='Boolean operator grouping')
g = p.add_option_group('Output')
g.add_option('-n',
action='callback', callback=carry,
help='Prefix the line number to matching lines')
g.add_option('-C',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines around match')
g.add_option('-B',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines before match')
g.add_option('-A',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines after match')
g.add_option('-l','--name-only','--files-with-matches',
action='callback', callback=carry,
help='Show only file names containing matching lines')
g.add_option('-L','--files-without-match',
action='callback', callback=carry,
help='Show only file names not containing matching lines')
def Execute(self, opt, args):
out = GrepColoring(self.manifest.manifestProject.config)
cmd_argv = ['grep']
if out.is_on and git_require((1,6,3)):
cmd_argv.append('--color')
cmd_argv.extend(getattr(opt,'cmd_argv',[]))
if '-e' not in cmd_argv:
if not args:
self.Usage()
cmd_argv.append('-e')
cmd_argv.append(args[0])
args = args[1:]
projects = self.GetProjects(args)
full_name = False
if len(projects) > 1:
cmd_argv.append('--full-name')
full_name = True
have_rev = False
if opt.revision:
if '--cached' in cmd_argv:
print >>sys.stderr,\
'fatal: cannot combine --cached and --revision'
sys.exit(1)
have_rev = True
cmd_argv.extend(opt.revision)
cmd_argv.append('--')
bad_rev = False
have_match = False
for project in projects:
p = GitCommand(project,
cmd_argv,
bare = False,
capture_stdout = True,
capture_stderr = True)
if p.Wait() != 0:
# no results
#
if p.stderr:
if have_rev and 'fatal: ambiguous argument' in p.stderr:
bad_rev = True
else:
out.project('--- project %s ---' % project.relpath)
out.nl()
out.write("%s", p.stderr)
out.nl()
continue
have_match = True
# We cut the last element, to avoid a blank line.
#
r = p.stdout.split('\n')
r = r[0:-1]
if have_rev and full_name:
for line in r:
rev, line = line.split(':', 1)
out.write("%s", rev)
out.write(':')
out.project(project.relpath)
out.write('/')
out.write("%s", line)
out.nl()
elif full_name:
for line in r:
out.project(project.relpath)
out.write('/')
out.write("%s", line)
out.nl()
else:
for line in r:
print line
if have_match:
sys.exit(0)
elif have_rev and bad_rev:
for r in opt.revision:
print >>sys.stderr, "error: can't search revision %s" % r
sys.exit(1)
else:
sys.exit(1)
| apache-2.0 |
akretion/knowledge | document_page/__openerp__.py | 2 | 1751 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Document Page',
'version': '1.0.1',
'category': 'Knowledge Management',
'description': """
Pages
=====
Web pages
""",
'author': ['OpenERP SA'],
'website': 'http://www.openerp.com/',
'license': 'AGPL-3',
'depends': [
'knowledge'
],
'data': [
'wizard/document_page_create_menu_view.xml',
'wizard/document_page_show_diff_view.xml',
'document_page_view.xml',
'security/document_page_security.xml',
'security/ir.model.access.csv',
],
'demo': [
'document_page_demo.xml'
],
'test': [
'test/document_page_test00.yml'
],
'installable': True,
'auto_install': False,
'images': [],
'css': ['static/src/css/document_page.css'],
}
| agpl-3.0 |
mlafeldt/google-python-class | basic/list2.py | 87 | 2160 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
return
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
return
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| apache-2.0 |
semonte/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/utils/ogrinfo.py | 389 | 1973 | """
This module includes some utility functions for inspecting the layout
of a GDAL data source -- the functionality is analogous to the output
produced by the `ogrinfo` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.geometries import GEO_CLASSES
def ogrinfo(data_source, num_features=10):
"""
Walks the available layers in the supplied `data_source`, displaying
the fields for the first `num_features` features.
"""
# Checking the parameters.
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise Exception('Data source parameter must be a string or a DataSource object.')
for i, layer in enumerate(data_source):
print "data source : %s" % data_source.name
print "==== layer %s" % i
print " shape type: %s" % GEO_CLASSES[layer.geom_type.num].__name__
print " # features: %s" % len(layer)
print " srs: %s" % layer.srs
extent_tup = layer.extent.tuple
print " extent: %s - %s" % (extent_tup[0:2], extent_tup[2:4])
print "Displaying the first %s features ====" % num_features
width = max(*map(len,layer.fields))
fmt = " %%%ss: %%s" % width
for j, feature in enumerate(layer[:num_features]):
print "=== Feature %s" % j
for fld_name in layer.fields:
type_name = feature[fld_name].type_name
output = fmt % (fld_name, type_name)
val = feature.get(fld_name)
if val:
if isinstance(val, str):
val_fmt = ' ("%s")'
else:
val_fmt = ' (%s)'
output += val_fmt % val
else:
output += ' (None)'
print output
# For backwards compatibility.
sample = ogrinfo
| apache-2.0 |
calvingit21/h2o-2 | py/testdir_multi_jvm/test_exec2_dkv.py | 9 | 2706 | import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
def write_syn_dataset(csvPathname, rowCount, SEED):
# 8 random generatators, 1 per column
r1 = random.Random(SEED)
r2 = random.Random(SEED)
r3 = random.Random(SEED)
r4 = random.Random(SEED)
r5 = random.Random(SEED)
r6 = random.Random(SEED)
r7 = random.Random(SEED)
r8 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = "%s,%s,%s,%s,%s,%s,%s,%s" % (
r1.randint(0,1),
r2.randint(0,2),
r3.randint(-4,4),
r4.randint(0,8),
r5.randint(-16,16),
r6.randint(-32,32),
0,
r8.randint(0,1))
dsf.write(rowData + "\n")
dsf.close()
zeroList = [
'Result0 = 0',
'Result.hex = 0',
]
exprList = [
'Result<n> = max(<keyX>[,<col1>])',
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(2,java_heap_GB=1)
@classmethod
def tearDownClass(cls):
# wait while I inspect things
# time.sleep(1500)
h2o.tear_down_cloud()
def test_exec2_dkv(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilenameAll = [
("syn_10x8.csv", 'cA', 15),
]
### csvFilenameList = random.sample(csvFilenameAll,1)
csvFilenameList = csvFilenameAll
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
for (csvFilename, hex_key, timeoutSecs) in csvFilenameList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random 10x8 csv"
write_syn_dataset(csvPathname, 10, SEEDPERFILE)
# creates csvFilename.hex from file in importFolder dir
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=2000)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
h2e.exec_zero_list(zeroList)
# does n+1 so use maxCol 6
h2e.exec_expr_list_rand(lenNodes, exprList, hex_key,
maxCol=6, maxRow=400000, maxTrials=100, timeoutSecs=timeoutSecs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
jimberlage/servo | tests/jquery/run_jquery.py | 215 | 9582 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import subprocess
import sys
import BaseHTTPServer
import SimpleHTTPServer
import SocketServer
import threading
import urlparse
# List of jQuery modules that will be tested.
# TODO(gw): Disabled most of them as something has been
# introduced very recently that causes the resource task
# to panic - and hard fail doesn't exit the servo
# process when this happens.
# See https://github.com/servo/servo/issues/6210 and
# https://github.com/servo/servo/issues/6211
JQUERY_MODULES = [
# "ajax", # panics
# "attributes",
# "callbacks",
# "core", # mozjs crash
# "css",
# "data",
# "deferred",
# "dimensions",
# "effects",
# "event", # panics
# "manipulation", # mozjs crash
# "offset",
# "queue",
"selector",
# "serialize",
# "support",
# "traversing",
# "wrap"
]
# Port to run the HTTP server on for jQuery.
TEST_SERVER_PORT = 8192
# A regex for matching console.log output lines from the test runner.
REGEX_PATTERN = "^\[jQuery test\] \[([0-9]+)/([0-9]+)/([0-9]+)] (.*)"
# The result of a single test group.
class TestResult:
def __init__(self, success, fail, total, text):
self.success = int(success)
self.fail = int(fail)
self.total = int(total)
self.text = text
def __key(self):
return (self.success, self.fail, self.total, self.text)
def __eq__(self, other):
return self.__key() == other.__key()
def __ne__(self, other):
return self.__key() != other.__key()
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return "ok={0} fail={1} total={2}".format(self.success, self.fail, self.total)
# Parse a line, producing a TestResult.
# Throws if unable to parse.
def parse_line_to_result(line):
match = re.match(REGEX_PATTERN, line)
success, fail, total, name = match.groups()
return name, TestResult(success, fail, total, line)
# Parse an entire buffer of lines to a dictionary
# of test results, keyed by the test name.
def parse_string_to_results(buffer):
test_results = {}
lines = buffer.splitlines()
for line in lines:
name, test_result = parse_line_to_result(line)
test_results[name] = test_result
return test_results
# Run servo and print / parse the results for a specific jQuery test module.
def run_servo(servo_exe, module):
url = "http://localhost:{0}/jquery/test/?module={1}".format(TEST_SERVER_PORT, module)
args = [servo_exe, url, "-z", "-f"]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if len(line) == 0:
break
line = line.rstrip()
try:
name, test_result = parse_line_to_result(line)
yield name, test_result
except AttributeError:
pass
# Build the filename for an expected results file.
def module_filename(module):
return 'expected_{0}.txt'.format(module)
# Read an existing set of expected results to compare against.
def read_existing_results(module):
with open(module_filename(module), 'r') as file:
buffer = file.read()
return parse_string_to_results(buffer)
# Write a set of results to file
def write_results(module, results):
with open(module_filename(module), 'w') as file:
for result in test_results.itervalues():
file.write(result.text + '\n')
# Print usage if command line args are incorrect
def print_usage():
print("USAGE: {0} test|update servo_binary jquery_base_dir".format(sys.argv[0]))
# Run a simple HTTP server to serve up the jQuery test suite
def run_http_server():
class ThreadingSimpleServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
allow_reuse_address = True
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# TODO(gw): HACK copy the fixed version from python
# main repo - due to https://bugs.python.org/issue23112
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
parts = urlparse.urlsplit(self.path)
if not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urlparse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def log_message(self, format, *args):
return
server = ThreadingSimpleServer(('', TEST_SERVER_PORT), RequestHandler)
while True:
sys.stdout.flush()
server.handle_request()
if __name__ == '__main__':
if len(sys.argv) == 4:
cmd = sys.argv[1]
servo_exe = sys.argv[2]
base_dir = sys.argv[3]
os.chdir(base_dir)
# Ensure servo binary can be found
if not os.path.isfile(servo_exe):
print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe))
sys.exit(1)
# Start the test server
httpd_thread = threading.Thread(target=run_http_server)
httpd_thread.setDaemon(True)
httpd_thread.start()
if cmd == "test":
print("Testing jQuery on Servo!")
test_count = 0
unexpected_count = 0
individual_success = 0
individual_total = 0
# Test each module separately
for module in JQUERY_MODULES:
print("\t{0}".format(module))
prev_test_results = read_existing_results(module)
for name, current_result in run_servo(servo_exe, module):
test_count += 1
individual_success += current_result.success
individual_total += current_result.total
# If this test was in the previous results, compare them.
if name in prev_test_results:
prev_result = prev_test_results[name]
if prev_result == current_result:
print("\t\tOK: {0}".format(name))
else:
unexpected_count += 1
print("\t\tFAIL: {0}: WAS {1} NOW {2}".format(name, prev_result, current_result))
del prev_test_results[name]
else:
# There was a new test that wasn't expected
unexpected_count += 1
print("\t\tNEW: {0}".format(current_result.text))
# Check what's left over, these are tests that were expected but didn't run this time.
for name in prev_test_results:
test_count += 1
unexpected_count += 1
print("\t\tMISSING: {0}".format(prev_test_results[name].text))
print("\tRan {0} test groups. {1} unexpected results.".format(test_count, unexpected_count))
print("\t{0} tests succeeded of {1} ({2:.2f}%)".format(individual_success,
individual_total,
100.0 * individual_success / individual_total))
if unexpected_count > 0:
sys.exit(1)
elif cmd == "update":
print("Updating jQuery expected results")
for module in JQUERY_MODULES:
print("\t{0}".format(module))
test_results = {}
for name, test_result in run_servo(servo_exe, module):
print("\t\t{0} {1}".format(name, test_result))
test_results[name] = test_result
write_results(module, test_results)
else:
print_usage()
else:
print_usage()
| mpl-2.0 |
epam-mooc/edx-platform | lms/djangoapps/courseware/tests/test_access.py | 16 | 9896 | import courseware.access as access
import datetime
import mock
from mock import Mock
from django.test import TestCase
from django.test.utils import override_settings
from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory
from student.tests.factories import AnonymousUserFactory, CourseEnrollmentAllowedFactory
from courseware.tests.tests import TEST_DATA_MIXED_MODULESTORE
import pytz
from opaque_keys.edx.locations import SlashSeparatedCourseKey
# pylint: disable=protected-access
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class AccessTestCase(TestCase):
"""
Tests for the various access controls on the student dashboard
"""
def setUp(self):
course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.course = course_key.make_usage_key('course', course_key.run)
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course.course_key)
self.course_instructor = InstructorFactory(course_key=self.course.course_key)
def test_has_access_to_course(self):
self.assertFalse(access._has_access_to_course(
None, 'staff', self.course.course_key
))
self.assertFalse(access._has_access_to_course(
self.anonymous_user, 'staff', self.course.course_key
))
self.assertFalse(access._has_access_to_course(
self.anonymous_user, 'instructor', self.course.course_key
))
self.assertTrue(access._has_access_to_course(
self.global_staff, 'staff', self.course.course_key
))
self.assertTrue(access._has_access_to_course(
self.global_staff, 'instructor', self.course.course_key
))
# A user has staff access if they are in the staff group
self.assertTrue(access._has_access_to_course(
self.course_staff, 'staff', self.course.course_key
))
self.assertFalse(access._has_access_to_course(
self.course_staff, 'instructor', self.course.course_key
))
# A user has staff and instructor access if they are in the instructor group
self.assertTrue(access._has_access_to_course(
self.course_instructor, 'staff', self.course.course_key
))
self.assertTrue(access._has_access_to_course(
self.course_instructor, 'instructor', self.course.course_key
))
# A user does not have staff or instructor access if they are
# not in either the staff or the the instructor group
self.assertFalse(access._has_access_to_course(
self.student, 'staff', self.course.course_key
))
self.assertFalse(access._has_access_to_course(
self.student, 'instructor', self.course.course_key
))
def test__has_access_string(self):
user = Mock(is_staff=True)
self.assertFalse(access._has_access_string(user, 'staff', 'not_global', self.course.course_key))
user._has_global_staff_access.return_value = True
self.assertTrue(access._has_access_string(user, 'staff', 'global', self.course.course_key))
self.assertRaises(ValueError, access._has_access_string, user, 'not_staff', 'global', self.course.course_key)
def test__has_access_descriptor(self):
# TODO: override DISABLE_START_DATES and test the start date branch of the method
user = Mock()
date = Mock()
date.start = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1) # make sure the start time is in the past
# Always returns true because DISABLE_START_DATES is set in test.py
self.assertTrue(access._has_access_descriptor(user, 'load', date))
self.assertTrue(access._has_access_descriptor(user, 'instructor', date))
with self.assertRaises(ValueError):
access._has_access_descriptor(user, 'not_load_or_staff', date)
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test__has_access_descriptor_staff_lock(self):
"""
Tests that "visible_to_staff_only" overrides start date.
"""
mock_unit = Mock()
mock_unit._class_tags = {} # Needed for detached check in _has_access_descriptor
def verify_access(student_should_have_access):
""" Verify the expected result from _has_access_descriptor """
self.assertEqual(student_should_have_access, access._has_access_descriptor(
self.anonymous_user, 'load', mock_unit, course_key=self.course.course_key)
)
# staff always has access
self.assertTrue(access._has_access_descriptor(
self.course_staff, 'load', mock_unit, course_key=self.course.course_key)
)
# No start date, staff lock on
mock_unit.visible_to_staff_only = True
verify_access(False)
# No start date, staff lock off.
mock_unit.visible_to_staff_only = False
verify_access(True)
# Start date in the past, staff lock on.
mock_unit.start = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1)
mock_unit.visible_to_staff_only = True
verify_access(False)
# Start date in the past, staff lock off.
mock_unit.visible_to_staff_only = False
verify_access(True)
# Start date in the future, staff lock on.
mock_unit.start = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1) # release date in the future
mock_unit.visible_to_staff_only = True
verify_access(False)
# Start date in the future, staff lock off.
mock_unit.visible_to_staff_only = False
verify_access(False)
def test__has_access_course_desc_can_enroll(self):
yesterday = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1)
tomorrow = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1)
# Non-staff can enroll if authenticated and specifically allowed for that course
# even outside the open enrollment period
user = UserFactory.create()
course = Mock(
enrollment_start=tomorrow, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain=''
)
CourseEnrollmentAllowedFactory(email=user.email, course_id=course.id)
self.assertTrue(access._has_access_course_desc(user, 'enroll', course))
# Staff can always enroll even outside the open enrollment period
user = StaffFactory.create(course_key=course.id)
self.assertTrue(access._has_access_course_desc(user, 'enroll', course))
# Non-staff cannot enroll if it is between the start and end dates and invitation only
# and not specifically allowed
course = Mock(
enrollment_start=yesterday, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='',
invitation_only=True
)
user = UserFactory.create()
self.assertFalse(access._has_access_course_desc(user, 'enroll', course))
# Non-staff can enroll if it is between the start and end dates and not invitation only
course = Mock(
enrollment_start=yesterday, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='',
invitation_only=False
)
self.assertTrue(access._has_access_course_desc(user, 'enroll', course))
# Non-staff cannot enroll outside the open enrollment period if not specifically allowed
course = Mock(
enrollment_start=tomorrow, enrollment_end=tomorrow,
id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='',
invitation_only=False
)
self.assertFalse(access._has_access_course_desc(user, 'enroll', course))
def test__user_passed_as_none(self):
"""Ensure has_access handles a user being passed as null"""
access.has_access(None, 'staff', 'global', None)
class UserRoleTestCase(TestCase):
"""
Tests for user roles.
"""
def setUp(self):
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course_key)
self.course_instructor = InstructorFactory(course_key=self.course_key)
def test_user_role_staff(self):
"""Ensure that user role is student for staff masqueraded as student."""
self.assertEqual(
'staff',
access.get_user_role(self.course_staff, self.course_key)
)
# Masquerade staff
self.course_staff.masquerade_as_student = True
self.assertEqual(
'student',
access.get_user_role(self.course_staff, self.course_key)
)
def test_user_role_instructor(self):
"""Ensure that user role is student for instructor masqueraded as student."""
self.assertEqual(
'instructor',
access.get_user_role(self.course_instructor, self.course_key)
)
# Masquerade instructor
self.course_instructor.masquerade_as_student = True
self.assertEqual(
'student',
access.get_user_role(self.course_instructor, self.course_key)
)
def test_user_role_anonymous(self):
"""Ensure that user role is student for anonymous user."""
self.assertEqual(
'student',
access.get_user_role(self.anonymous_user, self.course_key)
)
| agpl-3.0 |
WickedShell/ardupilot | mk/VRBRAIN/Tools/genmsg/test/test_genmsg_command_line.py | 51 | 1974 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def test_includepath_to_dict():
from genmsg.command_line import includepath_to_dict
assert {} == includepath_to_dict([])
assert {'std_msgs': [ 'foo' ]} == includepath_to_dict(['std_msgs:foo'])
assert {'std_msgs': [ 'foo' ], 'bar_msgs': [ 'baz:colon' ]} == includepath_to_dict(['std_msgs:foo', 'bar_msgs:baz:colon'])
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/elan/Backup Pools/Video_only_Test_Run/4_Video.py | 2 | 24087 | from ImageScripter import *
from elan import *
from pytank.Core.SystemFunctions import Say
def Check_If_Blue_Screen_Exists():
Say("Checking if Blue Screen Exists")
x = 0
for i in range(2):
if Android.bluescreenlarge.Exists() == True:
Say("BlueScreen exists")
x += 1
sleep(1)
if x >= 2:
raise ValueError("Blue Screen Exception")
else:
pass
elif Android.bluescreenlarge.Exists() == False:
Say("Blue Screen Does Not Exist")
pass
dvr_path = "//192.168.0.194/files/DVR"
static_camera_url = "http://192.168.0.103/onvif/device_service"
camera_url_generic = "http://192.168.0.195/nphMotionJpeg?Resolution=320x240&Quality=Standard"
camera_ip = "195"
cameraType = "Panasonic Standard Series"
generic_user = 'Administrator'
generic_Pasword = 'password'
#############
#############Add Nas
Configurator.Start()
Configurator.video.Click()
Configurator.builtindvroptions.Click()
Configurator.Edit.SetText(0,dvr_path)
Configurator.Edit.SetText(6,"50000")
Configurator.apply.Click()
Configurator.custompages.Click()
Configurator.builtindvroptions.Click()
Configurator.storagesizesetto5000.Wait()
####################################################ADD Dynamic
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type('Dynamic')
AddNewDevice.ListView.Select(1,"ONVIF Video Source (Dynamic)")
AddNewDevice.Wait()
sleep(3)
AddNewDevice.Edit.SetText(1,'admin')
AddNewDevice.Edit.SetText(2,'admin')
AddNewDevice.PushButton.Click("Search For Devices")
Add.devicetype.Wait()
Add.devicetype.RealClick()
sleep(5)
Add.ListView.Select(0,"Linear")
Add.PushButton.Click("OK")
####################################Add Static 1
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type('Static One')
AddNewDevice.ListView.Select(1,"ONVIF Video Source (Static)")
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.Edit.SetText(6,"admin")
Configurator.Edit.SetText(7,"admin")
Configurator.Edit.SetText(5,static_camera_url)
sleep(2)
Configurator.apply.Click()
####################################Add Static 2
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type('Static Two')
AddNewDevice.ListView.Select(1,"ONVIF Video Source (Static)")
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.Edit.SetText(6,"admin")
Configurator.Edit.SetText(7,"admin")
Configurator.Edit.SetText(5,static_camera_url)
sleep(2)
Configurator.apply.Click()
####################################Add Generic 1
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type('Generic One')
AddNewDevice.ListView.Select(1,"Generic URL Video Camera")
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
sleep(5)
Configurator.Edit.SetText(5,camera_url_generic)
Configurator.Edit.SetText(6,camera_ip)
Configurator.Edit.SetText(11,generic_user)
Configurator.Edit.SetText(12,generic_Pasword)
sleep(2)
Configurator.apply.Click()
####################################Add Generic 2
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type('Generic Two')
AddNewDevice.ListView.Select(1,"Generic URL Video Camera")
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
sleep(3)
Configurator.Edit.SetText(5,camera_url_generic)
Configurator.Edit.SetText(6,camera_ip)
Configurator.Edit.SetText(11,generic_user)
Configurator.Edit.SetText(12,generic_Pasword)
sleep(2)
Configurator.apply.Click()
###############################Start loop#################
####################################Add Panasonic one PPlain
NameVar = "PPlain"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(6,1)
Configurator.ComboBox.Select(7,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Two PZoom
NameVar = "PZoom"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
############################################
try:
Configurator.videocamerassources.RightClickType('a')
except:
pass
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(1,1)
#Turn off Full Screen, Resolution
Configurator.ComboBox.Select(6,1)
Configurator.ComboBox.Select(7,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Three PPResets
NameVar = "PPresets"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(2,1)
#Turn off Full Screen, Resolution
Configurator.ComboBox.Select(6,1)
Configurator.ComboBox.Select(7,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Four PAudio
NameVar = "PAudio"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(3,1)
#Turn off Full Screen, Resolution
Configurator.ComboBox.Select(6,1)
Configurator.ComboBox.Select(7,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic FIVE FullScreen
NameVar = "PFullScreen"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(6,0)
#Turn off Resolution
Configurator.ComboBox.Select(7,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Six Resolution
NameVar = "PResolution"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(10)
for i in range(10):
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(7,0)
#Turn off Full Screen
Configurator.ComboBox.Select(6,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Seven All
NameVar = "PAll"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(0,1)
Configurator.ComboBox.Select(1,1)
Configurator.ComboBox.Select(2,1)
Configurator.ComboBox.Select(3,1)
Configurator.ComboBox.Select(4,1)
Configurator.ComboBox.Select(10,1)
Configurator.ComboBox.Select(11,1)
Configurator.ComboBox.Select(6,0)
Configurator.ComboBox.Select(7,0)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Eight Low
NameVar = "PLow"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(7,0)
Configurator.ComboBox.Select(8,0)
#Turn off Full Screen
Configurator.ComboBox.Select(6,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Nine PMed
NameVar = "PMed"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(7,0)
Configurator.ComboBox.Select(8,1)
#Turn off Full Screen
Configurator.ComboBox.Select(6,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Ten PHigh
NameVar = "PHigh"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
try:
Configurator.videocamerassources.RightClickType('a')
except ValueError:
pass
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(7,0)
Configurator.ComboBox.Select(8,2)
#Turn off Full Screen
Configurator.ComboBox.Select(6,1)
sleep(2)
Configurator.apply.Click()
####################################Add Panasonic Eleven PDVR
NameVar = "PDVR"
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type(NameVar)
AddNewDevice.ListView.Select(1,cameraType)
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
Configurator.name.Wait()
sleep(3)
Configurator.Edit.SetText(5,camera_ip)
Configurator.ComboBox.Select(4,1)
Configurator.ComboBox.Select(9,1)
#Res high
Configurator.ComboBox.Select(12,2)
sleep(1)
Configurator.ComboBox.Select(12,2)
#Turn off Full Screen, Resolution
Configurator.ComboBox.Select(6,1)
Configurator.ComboBox.Select(7,1)
sleep(2)
Configurator.apply.Click()
#############################################################quad setup
Configurator.custompages.RightClickType('a')
Add.Edit.SetText(0,'FourCam')
Add.PushButton.Click("OK")
Configurator.system.Click()
Configurator.video.Click()
sleep(3)
Configurator.fourcam.Max()
Configurator.largeformat.Click()
Configurator.topleft.RightClickType('a')
Add.Edit.SetText(0,'VidONE')
Add.ListBox.Select(0,'Video Stream')
Add.PushButton.Click('OK')
Configurator.topright.RightClickType('a')
Add.Edit.SetText(0,'VidTWO')
Add.ListBox.Select(0,'Video Stream')
Add.PushButton.Click('OK')
Configurator.bottomleft.RightClickType('a')
Add.Edit.SetText(0,'VidTHREE')
Add.ListBox.Select(0,'Video Stream')
Add.PushButton.Click('OK')
Configurator.bottomright.RightClickType('a')
Add.Edit.SetText(0,'VidFOUR')
Add.ListBox.Select(0,'Video Stream')
Add.PushButton.Click('OK')
Configurator.apply.Click()
sleep(4)
###############################################VIDONE
Configurator.vidone.RightClickType("s")
sleep(2)
#print(VideoStream.ComboBox.GetSelection(2))
try:
VideoStream.ComboBox.Select(2,'Dynamic')
except:
Configurator.vidfour.Click()
Configurator.vidone.RightClickType("s")
sleep(2)
VideoStream.ComboBox.Select(2,'Dynamic')
VideoStream.ComboBox.Select(3,'FS Full')
VideoStream.Close()
Configurator.apply.Click()
###############################################VIDTWO
Configurator.vidtwo.RightClickType("s")
sleep(2)
#print(VideoStream.ComboBox.GetSelection(2))
VideoStream.ComboBox.Select(2,'Generic One')
VideoStream.Close()
Configurator.apply.Click()
###############################################VIDTHREE
Configurator.vidthree.RightClickType("s")
sleep(2)
#print(VideoStream.ComboBox.GetSelection(2))
VideoStream.ComboBox.Select(2,'Generic Two')
VideoStream.Close()
Configurator.apply.Click()
###############################################VIDFOUR
Configurator.vidfour.RightClickType("s")
sleep(2)
#print(VideoStream.ComboBox.GetSelection(2))
VideoStream.ComboBox.Select(2,'PAll')
VideoStream.Close()
Configurator.apply.Click()
###########################################################CheckDVRSTATus
sleep(3)
Configurator.pdvrdevice.RightClickType('S')
sleep(2)
Status.bytesout.Wait()
sleep(2)
if Status.motiona.Exists == True:
raise ValueError("Motion should not be N.A.")
else:
Status.Close()
Configurator.CloseAndClean()
if Android.shuddar.Exists():
Android.shuddar.Click()
else:
print('shudder doesnt exist')
Android.video.Click()
Android.videocameras.Click(xoffset = -500,yoffset = 200)
############################################Dynamic
Check_If_Blue_Screen_Exists()
Android.gonextarrow.Click()
############################################Static 1
Check_If_Blue_Screen_Exists()
Android.gonextarrow.Click()
############################################Static 2
Check_If_Blue_Screen_Exists()
Android.gonextarrow.Click()
############################################Generic 1
Check_If_Blue_Screen_Exists()
Android.gonextarrow.Click()
############################################Generic 2
Check_If_Blue_Screen_Exists()
Android.gonextarrow.Click()
############################################Plain########
Check_If_Blue_Screen_Exists()
Android.nocamcontrols.Wait()
Android.gonextarrow.Click()
############################################Zoom########
Check_If_Blue_Screen_Exists()
for i in range(5):
Android.zoomincam.Click()
sleep(3)
for i in range(5):
Android.zoomoutcam.Click()
Check_If_Blue_Screen_Exists()
Android.gonextarrow.Click()
############################################Preset########
Check_If_Blue_Screen_Exists()
Android.preset.Click()
sleep(3)
Android.preset7.Click()
Android.gonextarrow.Click()
############################################Audio########
Check_If_Blue_Screen_Exists()
Android.audioon.Click()
Android.audiooff.Click()
Android.gonextarrow.Click()
############################################Full Screen########
Check_If_Blue_Screen_Exists()
Android.fullscreen.Wait()
####################################Go into full screen
Android.fullscreen.Click()
sleep(3)
Android.ClickWithXY(400,300)
sleep(3)
Android.gonextarrow.Click()
############################################Resolution########
Check_If_Blue_Screen_Exists()
######Check Res
Android.highrez.Click()
Android.mediumrezmenu.Click()
Android.medrez.Click()
Android.lowrezmenu.Click()
Android.lowrez.Click()
Android.highrezinmenu.Click()
#Android.highres.Click()
#Android.mediuminner.Click()
#Android.medres.Click()
#Android.highinner.Click()
#Android.highres.Wait()
Android.gonextarrow.Click()
############################################All########
Check_If_Blue_Screen_Exists()
Android.allvidcontrols.Wait()
######Check Full
Android.fullscreen.Click()
sleep(3)
Android.ClickWithXY(400,300)
sleep(3)
######Check Res
Android.highrez.Click()
Android.mediumrezmenu.Click()
Android.medrez.Click()
Android.lowrezmenu.Click()
Android.lowrez.Click()
Android.highrezinmenu.Click()
Android.allvidcontrols.Wait()
Android.gonextarrow.Click()
############################################Res Low########
Check_If_Blue_Screen_Exists()
Android.resisonlow.Wait()
Android.gonextarrow.Click()
############################################Res Med########
Check_If_Blue_Screen_Exists()
Android.resisonmed.Wait()
Android.gonextarrow.Click()
############################################Res High########
Check_If_Blue_Screen_Exists()
Android.resisonhigh.Wait()
Android.gonextarrow.Click()
############################################DVR########
Check_If_Blue_Screen_Exists()
Android.dvrlogo.Click()
Android.dvrblockback.ClickAndRepeat(7)
Android.playvideo.Click()
####################
Android.alldvrcontrols.Wait()
#############Minus
try:
Android.dvrminus.Click()
except:
sleep(4)
#################Remove when timeline fixed
try:
Android.ptime.Click()
Android.dvrminus.Click()
except:
print('Faled TimeLine')
Android.dvrforward.Click()
Android.dvrminus.Click()
Android.dvrforward.Click()
#############Plus
try:
Android.dvrplus.Click()
except:
sleep(2)
Android.dvrbigback.Click()
sleep(2)
Android.dvrplus.Click()
#############Middle
Android.dvrmiddle.Click()
#############Other
Android.dvrback.Click()
Android.dvrforward.Click()
Android.dvrpause.Click()
Android.skipbackdvr.Click()
Android.skipforward.Click()
Android.dvrforwardback.Click()
Android.dvrblockback.Click()
Android.dvrblockforward.Click()
##############################################DVR END
Android.gonextarrow3.Click()
##################################################Quad View
########################################################Right RIGHT RIGHT RIGHT
Android.shuddar.Click()
try:
Android.homeicon.Click()
except:
sleep(3)
Android.shuddar.Click()
Android.homeicon.Click()
Android.shuddar.Click()
Android.video.Click()
Android.videocameras.Click(xoffset = -500,yoffset = 200)
##################################################Next next next then back 5x#############
for t in range(2):
########################################################Right Right Right Right
for i in range(16):
Android.gonextarrow2.Click()
########################################################Left Left Left Left
for i in range(16):
Android.gobacktarrow2.Click()
for i in range(16):
Android.gonextarrow2.Click()
Check_If_Blue_Screen_Exists()
###############################################QUad Cam
#BUG
#Android.quadfour.ClickAndRepeat(amount_of_times = 5,yoffset = 150)
#Android.ClickWithXY(400,200)
####################################Add Relay Generic 3
Configurator.Start()
Configurator.video.Click()
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type('Generic Relay Three')
AddNewDevice.ListView.Select(1,"Generic URL Video Camera")
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
sleep(3)
Configurator.Edit.SetText(5,camera_url_generic)
Configurator.Edit.SetText(6,camera_ip)
Configurator.Edit.SetText(11,generic_user)
Configurator.Edit.SetText(12,generic_Pasword)
sleep(2)
Configurator.apply.Click()
Configurator.genericrelaythreehighlighted.Max()
Configurator.cameraoptions.Click(threshold=.92)
sleep(3)
Configurator.Edit.SetText(0,'Momentary')
Configurator.ComboBox.Select(0,2)
Configurator.ComboBox.Select(1,0)
Configurator.Edit.SetText(1,'Toggle')
Configurator.ComboBox.Select(2,3)
Configurator.ComboBox.Select(3,1)
Configurator.ComboBox.Select(3,1)
Configurator.apply.Click()
######################################Check Relay in Viewer
Configurator.CloseAndClean()
Android.shuddar.Click()
Android.fourcamallview.Click(xoffset=400,yoffset = 30)
Check_If_Blue_Screen_Exists()
Android.fullscreenmomentarytoggle.Wait()
sleep(3)
Android.momentaryvid.Click()
sleep(3)
Android.togglevid.Click()
Check_If_Blue_Screen_Exists()
Android.toggleactivated.Wait()
###############################Add Relay to All#################################
Configurator.Start()
Configurator.video.Click()
sleep(4)
Configurator.pallsource.Click()
Configurator.pallsourcehigh.Max()
Configurator.cameraoptions.Click(threshold=.92)
sleep(3)
Configurator.Edit.SetText(0,'Momentary')
Configurator.ComboBox.Select(0,2)
Configurator.ComboBox.Select(1,0)
Configurator.Edit.SetText(1,'Toggle')
Configurator.ComboBox.Select(2,3)
Configurator.ComboBox.Select(3,1)
Configurator.ComboBox.Select(3,1)
Configurator.apply.Click()
######################################Check All Relay in Viewer
Configurator.CloseAndClean()
Android.gobackarrow.Click()
Android.gobackarrow.Click()
Android.gobackarrow.Click()
Android.gobackarrow.Click()
Android.gobackarrow.Click()
Android.gobackarrow.Click()
Check_If_Blue_Screen_Exists()
sleep(3)
Android.momentaryvid.Click()
sleep(3)
Android.togglevid.Click()
Check_If_Blue_Screen_Exists()
######################################All Zoom in
Android.zoomin2.ClickAndRepeat(10)
Android.zoomout2.ClickAndRepeat(10)
Check_If_Blue_Screen_Exists()
###############Delete Generic Devices Crash
'''
Configurator.Start()
Configurator.video.Click()
Configurator.videocamerassources.RightClickType('a')
try:
AddNewDevice.Wait()
except:
sleep(3)
#theObject.RealClick()
Configurator.videocamerassources.RightClickType('a')
AddNewDevice.name.Wait()
sleep(1)
AddNewDevice.Type('Generic Crash')
AddNewDevice.ListView.Select(1,"Generic URL Video Camera")
AddNewDevice.Wait()
sleep(3)
Add.PushButton.Click("OK")
sleep(3)
Configurator.Edit.SetText(5,camera_url_generic)
sleep(2)
Configurator.apply.Click()
Configurator.genericcrashhighlighted.RightClickType('d')
HlConfig.PushButton.Click('Yes')
try:
HlConfig.Close()
except:
pass
'''
############### End Delete Generic Devices Crash
################################################Delete All Cameras
Configurator.Start()
Configurator.video.Click()
sleep(5)
while True:
if Configurator.pcam.Exists():
Configurator.pcam.RightClickType('d')
sleep(2)
HlConfig.PushButton.Click('Yes')
try:
HlConfig.Close()
except:
pass
else:
print('doesnt exists')
break
while True:
if Configurator.gcam.Exists():
Configurator.gcam.RightClickType('d')
sleep(2)
HlConfig.PushButton.Click('Yes')
try:
HlConfig.Close()
except:
pass
else:
print('doesnt exists')
break
Configurator.CloseAndClean()
##########################################################################
Configurator.Reset()
print('###################Finished########################')
| gpl-3.0 |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/twisted/internet/_sslverify.py | 20 | 24219 | # -*- test-case-name: twisted.test.test_sslverify -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# Copyright (c) 2005-2008 Twisted Matrix Laboratories.
import itertools
from OpenSSL import SSL, crypto
from twisted.python import reflect, util
from twisted.python.hashlib import md5
from twisted.internet.defer import Deferred
from twisted.internet.error import VerifyError, CertificateError
# Private - shared between all OpenSSLCertificateOptions, counts up to provide
# a unique session id for each context
_sessionCounter = itertools.count().next
_x509names = {
'CN': 'commonName',
'commonName': 'commonName',
'O': 'organizationName',
'organizationName': 'organizationName',
'OU': 'organizationalUnitName',
'organizationalUnitName': 'organizationalUnitName',
'L': 'localityName',
'localityName': 'localityName',
'ST': 'stateOrProvinceName',
'stateOrProvinceName': 'stateOrProvinceName',
'C': 'countryName',
'countryName': 'countryName',
'emailAddress': 'emailAddress'}
class DistinguishedName(dict):
"""
Identify and describe an entity.
Distinguished names are used to provide a minimal amount of identifying
information about a certificate issuer or subject. They are commonly
created with one or more of the following fields::
commonName (CN)
organizationName (O)
organizationalUnitName (OU)
localityName (L)
stateOrProvinceName (ST)
countryName (C)
emailAddress
"""
__slots__ = ()
def __init__(self, **kw):
for k, v in kw.iteritems():
setattr(self, k, v)
def _copyFrom(self, x509name):
d = {}
for name in _x509names:
value = getattr(x509name, name, None)
if value is not None:
setattr(self, name, value)
def _copyInto(self, x509name):
for k, v in self.iteritems():
setattr(x509name, k, v)
def __repr__(self):
return '<DN %s>' % (dict.__repr__(self)[1:-1])
def __getattr__(self, attr):
try:
return self[_x509names[attr]]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
assert type(attr) is str
if not attr in _x509names:
raise AttributeError("%s is not a valid OpenSSL X509 name field" % (attr,))
realAttr = _x509names[attr]
value = value.encode('ascii')
assert type(value) is str
self[realAttr] = value
def inspect(self):
"""
Return a multi-line, human-readable representation of this DN.
"""
l = []
lablen = 0
def uniqueValues(mapping):
return dict.fromkeys(mapping.itervalues()).keys()
for k in uniqueValues(_x509names):
label = util.nameToLabel(k)
lablen = max(len(label), lablen)
v = getattr(self, k, None)
if v is not None:
l.append((label, v))
lablen += 2
for n, (label, attr) in enumerate(l):
l[n] = (label.rjust(lablen)+': '+ attr)
return '\n'.join(l)
DN = DistinguishedName
class CertBase:
def __init__(self, original):
self.original = original
def _copyName(self, suffix):
dn = DistinguishedName()
dn._copyFrom(getattr(self.original, 'get_'+suffix)())
return dn
def getSubject(self):
"""
Retrieve the subject of this certificate.
@rtype: L{DistinguishedName}
@return: A copy of the subject of this certificate.
"""
return self._copyName('subject')
def _handleattrhelper(Class, transport, methodName):
"""
(private) Helper for L{Certificate.peerFromTransport} and
L{Certificate.hostFromTransport} which checks for incompatible handle types
and null certificates and raises the appropriate exception or returns the
appropriate certificate object.
"""
method = getattr(transport.getHandle(),
"get_%s_certificate" % (methodName,), None)
if method is None:
raise CertificateError(
"non-TLS transport %r did not have %s certificate" % (transport, methodName))
cert = method()
if cert is None:
raise CertificateError(
"TLS transport %r did not have %s certificate" % (transport, methodName))
return Class(cert)
class Certificate(CertBase):
"""
An x509 certificate.
"""
def __repr__(self):
return '<%s Subject=%s Issuer=%s>' % (self.__class__.__name__,
self.getSubject().commonName,
self.getIssuer().commonName)
def __eq__(self, other):
if isinstance(other, Certificate):
return self.dump() == other.dump()
return False
def __ne__(self, other):
return not self.__eq__(other)
def load(Class, requestData, format=crypto.FILETYPE_ASN1, args=()):
"""
Load a certificate from an ASN.1- or PEM-format string.
@rtype: C{Class}
"""
return Class(crypto.load_certificate(format, requestData), *args)
load = classmethod(load)
_load = load
def dumpPEM(self):
"""
Dump this certificate to a PEM-format data string.
@rtype: C{str}
"""
return self.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load a certificate from a PEM-format data string.
@rtype: C{Class}
"""
return Class.load(data, crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def peerFromTransport(Class, transport):
"""
Get the certificate for the remote end of the given transport.
@type: L{ISystemHandle}
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a peer
certificate.
"""
return _handleattrhelper(Class, transport, 'peer')
peerFromTransport = classmethod(peerFromTransport)
def hostFromTransport(Class, transport):
"""
Get the certificate for the local end of the given transport.
@param transport: an L{ISystemHandle} provider; the transport we will
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a host
certificate.
"""
return _handleattrhelper(Class, transport, 'host')
hostFromTransport = classmethod(hostFromTransport)
def getPublicKey(self):
"""
Get the public key for this certificate.
@rtype: L{PublicKey}
"""
return PublicKey(self.original.get_pubkey())
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate(format, self.original)
def serialNumber(self):
"""
Retrieve the serial number of this certificate.
@rtype: C{int}
"""
return self.original.get_serial_number()
def digest(self, method='md5'):
"""
Return a digest hash of this certificate using the specified hash
algorithm.
@param method: One of C{'md5'} or C{'sha'}.
@rtype: C{str}
"""
return self.original.digest(method)
def _inspect(self):
return '\n'.join(['Certificate For Subject:',
self.getSubject().inspect(),
'\nIssuer:',
self.getIssuer().inspect(),
'\nSerial Number: %d' % self.serialNumber(),
'Digest: %s' % self.digest()])
def inspect(self):
"""
Return a multi-line, human-readable representation of this
Certificate, including information about the subject, issuer, and
public key.
"""
return '\n'.join((self._inspect(), self.getPublicKey().inspect()))
def getIssuer(self):
"""
Retrieve the issuer of this certificate.
@rtype: L{DistinguishedName}
@return: A copy of the issuer of this certificate.
"""
return self._copyName('issuer')
def options(self, *authorities):
raise NotImplementedError('Possible, but doubtful we need this yet')
class CertificateRequest(CertBase):
"""
An x509 certificate request.
Certificate requests are given to certificate authorities to be signed and
returned resulting in an actual certificate.
"""
def load(Class, requestData, requestFormat=crypto.FILETYPE_ASN1):
req = crypto.load_certificate_request(requestFormat, requestData)
dn = DistinguishedName()
dn._copyFrom(req.get_subject())
if not req.verify(req.get_pubkey()):
raise VerifyError("Can't verify that request for %r is self-signed." % (dn,))
return Class(req)
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate_request(format, self.original)
class PrivateCertificate(Certificate):
"""
An x509 certificate and private key.
"""
def __repr__(self):
return Certificate.__repr__(self) + ' with ' + repr(self.privateKey)
def _setPrivateKey(self, privateKey):
if not privateKey.matches(self.getPublicKey()):
raise VerifyError(
"Certificate public and private keys do not match.")
self.privateKey = privateKey
return self
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
"""
Create a new L{PrivateCertificate} from the given certificate data and
this instance's private key.
"""
return self.load(newCertData, self.privateKey, format)
def load(Class, data, privateKey, format=crypto.FILETYPE_ASN1):
return Class._load(data, format)._setPrivateKey(privateKey)
load = classmethod(load)
def inspect(self):
return '\n'.join([Certificate._inspect(self),
self.privateKey.inspect()])
def dumpPEM(self):
"""
Dump both public and private parts of a private certificate to
PEM-format data.
"""
return self.dump(crypto.FILETYPE_PEM) + self.privateKey.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load both private and public parts of a private certificate from a
chunk of PEM-format data.
"""
return Class.load(data, KeyPair.load(data, crypto.FILETYPE_PEM),
crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def fromCertificateAndKeyPair(Class, certificateInstance, privateKey):
privcert = Class(certificateInstance.original)
return privcert._setPrivateKey(privateKey)
fromCertificateAndKeyPair = classmethod(fromCertificateAndKeyPair)
def options(self, *authorities):
options = dict(privateKey=self.privateKey.original,
certificate=self.original)
if authorities:
options.update(dict(verify=True,
requireCertificate=True,
caCerts=[auth.original for auth in authorities]))
return OpenSSLCertificateOptions(**options)
def certificateRequest(self, format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
return self.privateKey.certificateRequest(
self.getSubject(),
format,
digestAlgorithm)
def signCertificateRequest(self,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1):
issuer = self.getSubject()
return self.privateKey.signCertificateRequest(
issuer,
requestData,
verifyDNCallback,
serialNumber,
requestFormat,
certificateFormat)
def signRequestObject(self, certificateRequest, serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
return self.privateKey.signRequestObject(self.getSubject(),
certificateRequest,
serialNumber,
secondsToExpiry,
digestAlgorithm)
class PublicKey:
def __init__(self, osslpkey):
self.original = osslpkey
req1 = crypto.X509Req()
req1.set_pubkey(osslpkey)
self._emptyReq = crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req1)
def matches(self, otherKey):
return self._emptyReq == otherKey._emptyReq
# XXX This could be a useful method, but sometimes it triggers a segfault,
# so we'll steer clear for now.
# def verifyCertificate(self, certificate):
# """
# returns None, or raises a VerifyError exception if the certificate
# could not be verified.
# """
# if not certificate.original.verify(self.original):
# raise VerifyError("We didn't sign that certificate.")
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.keyHash())
def keyHash(self):
"""
MD5 hex digest of signature on an empty certificate request with this
key.
"""
return md5(self._emptyReq).hexdigest()
def inspect(self):
return 'Public Key with Hash: %s' % (self.keyHash(),)
class KeyPair(PublicKey):
def load(Class, data, format=crypto.FILETYPE_ASN1):
return Class(crypto.load_privatekey(format, data))
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_privatekey(format, self.original)
def __getstate__(self):
return self.dump()
def __setstate__(self, state):
self.__init__(crypto.load_privatekey(crypto.FILETYPE_ASN1, state))
def inspect(self):
t = self.original.type()
if t == crypto.TYPE_RSA:
ts = 'RSA'
elif t == crypto.TYPE_DSA:
ts = 'DSA'
else:
ts = '(Unknown Type!)'
L = (self.original.bits(), ts, self.keyHash())
return '%s-bit %s Key Pair with Hash: %s' % L
def generate(Class, kind=crypto.TYPE_RSA, size=1024):
pkey = crypto.PKey()
pkey.generate_key(kind, size)
return Class(pkey)
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
return PrivateCertificate.load(newCertData, self, format)
generate = classmethod(generate)
def requestObject(self, distinguishedName, digestAlgorithm='md5'):
req = crypto.X509Req()
req.set_pubkey(self.original)
distinguishedName._copyInto(req.get_subject())
req.sign(self.original, digestAlgorithm)
return CertificateRequest(req)
def certificateRequest(self, distinguishedName,
format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
"""Create a certificate request signed with this key.
@return: a string, formatted according to the 'format' argument.
"""
return self.requestObject(distinguishedName, digestAlgorithm).dump(format)
def signCertificateRequest(self,
issuerDistinguishedName,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Given a blob of certificate request data and a certificate authority's
DistinguishedName, return a blob of signed certificate data.
If verifyDNCallback returns a Deferred, I will return a Deferred which
fires the data when that Deferred has completed.
"""
hlreq = CertificateRequest.load(requestData, requestFormat)
dn = hlreq.getSubject()
vval = verifyDNCallback(dn)
def verified(value):
if not value:
raise VerifyError("DN callback %r rejected request DN %r" % (verifyDNCallback, dn))
return self.signRequestObject(issuerDistinguishedName, hlreq,
serialNumber, secondsToExpiry, digestAlgorithm).dump(certificateFormat)
if isinstance(vval, Deferred):
return vval.addCallback(verified)
else:
return verified(vval)
def signRequestObject(self,
issuerDistinguishedName,
requestObject,
serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Sign a CertificateRequest instance, returning a Certificate instance.
"""
req = requestObject.original
dn = requestObject.getSubject()
cert = crypto.X509()
issuerDistinguishedName._copyInto(cert.get_issuer())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(secondsToExpiry)
cert.set_serial_number(serialNumber)
cert.sign(self.original, digestAlgorithm)
return Certificate(cert)
def selfSignedCert(self, serialNumber, **kw):
dn = DN(**kw)
return PrivateCertificate.fromCertificateAndKeyPair(
self.signRequestObject(dn, self.requestObject(dn), serialNumber),
self)
class OpenSSLCertificateOptions(object):
"""
A factory for SSL context objects for both SSL servers and clients.
"""
_context = None
# Older versions of PyOpenSSL didn't provide OP_ALL. Fudge it here, just in case.
_OP_ALL = getattr(SSL, 'OP_ALL', 0x0000FFFF)
# OP_NO_TICKET is not (yet) exposed by PyOpenSSL
_OP_NO_TICKET = 0x00004000
method = SSL.TLSv1_METHOD
def __init__(self,
privateKey=None,
certificate=None,
method=None,
verify=False,
caCerts=None,
verifyDepth=9,
requireCertificate=True,
verifyOnce=True,
enableSingleUseKeys=True,
enableSessions=True,
fixBrokenPeers=False,
enableSessionTickets=False):
"""
Create an OpenSSL context SSL connection context factory.
@param privateKey: A PKey object holding the private key.
@param certificate: An X509 object holding the certificate.
@param method: The SSL protocol to use, one of SSLv23_METHOD,
SSLv2_METHOD, SSLv3_METHOD, TLSv1_METHOD. Defaults to TLSv1_METHOD.
@param verify: If True, verify certificates received from the peer and
fail the handshake if verification fails. Otherwise, allow anonymous
sessions and sessions with certificates which fail validation. By
default this is False.
@param caCerts: List of certificate authority certificate objects to
use to verify the peer's certificate. Only used if verify is
C{True}, and if verify is C{True}, this must be specified. Since
verify is C{False} by default, this is C{None} by default.
@type caCerts: C{list} of L{OpenSSL.crypto.X509}
@param verifyDepth: Depth in certificate chain down to which to verify.
If unspecified, use the underlying default (9).
@param requireCertificate: If True, do not allow anonymous sessions.
@param verifyOnce: If True, do not re-verify the certificate
on session resumption.
@param enableSingleUseKeys: If True, generate a new key whenever
ephemeral DH parameters are used to prevent small subgroup attacks.
@param enableSessions: If True, set a session ID on each context. This
allows a shortened handshake to be used when a known client reconnects.
@param fixBrokenPeers: If True, enable various non-spec protocol fixes
for broken SSL implementations. This should be entirely safe,
according to the OpenSSL documentation, but YMMV. This option is now
off by default, because it causes problems with connections between
peers using OpenSSL 0.9.8a.
@param enableSessionTickets: If True, enable session ticket extension
for session resumption per RFC 5077. Note there is no support for
controlling session tickets. This option is off by default, as some
server implementations don't correctly process incoming empty session
ticket extensions in the hello.
"""
assert (privateKey is None) == (certificate is None), "Specify neither or both of privateKey and certificate"
self.privateKey = privateKey
self.certificate = certificate
if method is not None:
self.method = method
self.verify = verify
assert ((verify and caCerts) or
(not verify)), "Specify client CA certificate information if and only if enabling certificate verification"
self.caCerts = caCerts
self.verifyDepth = verifyDepth
self.requireCertificate = requireCertificate
self.verifyOnce = verifyOnce
self.enableSingleUseKeys = enableSingleUseKeys
self.enableSessions = enableSessions
self.fixBrokenPeers = fixBrokenPeers
self.enableSessionTickets = enableSessionTickets
def __getstate__(self):
d = self.__dict__.copy()
try:
del d['_context']
except KeyError:
pass
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""Return a SSL.Context object.
"""
if self._context is None:
self._context = self._makeContext()
return self._context
def _makeContext(self):
ctx = SSL.Context(self.method)
if self.certificate is not None and self.privateKey is not None:
ctx.use_certificate(self.certificate)
ctx.use_privatekey(self.privateKey)
# Sanity check
ctx.check_privatekey()
verifyFlags = SSL.VERIFY_NONE
if self.verify:
verifyFlags = SSL.VERIFY_PEER
if self.requireCertificate:
verifyFlags |= SSL.VERIFY_FAIL_IF_NO_PEER_CERT
if self.verifyOnce:
verifyFlags |= SSL.VERIFY_CLIENT_ONCE
if self.caCerts:
store = ctx.get_cert_store()
for cert in self.caCerts:
store.add_cert(cert)
# It'd be nice if pyOpenSSL let us pass None here for this behavior (as
# the underlying OpenSSL API call allows NULL to be passed). It
# doesn't, so we'll supply a function which does the same thing.
def _verifyCallback(conn, cert, errno, depth, preverify_ok):
return preverify_ok
ctx.set_verify(verifyFlags, _verifyCallback)
if self.verifyDepth is not None:
ctx.set_verify_depth(self.verifyDepth)
if self.enableSingleUseKeys:
ctx.set_options(SSL.OP_SINGLE_DH_USE)
if self.fixBrokenPeers:
ctx.set_options(self._OP_ALL)
if self.enableSessions:
sessionName = md5("%s-%d" % (reflect.qual(self.__class__), _sessionCounter())).hexdigest()
ctx.set_session_id(sessionName)
if not self.enableSessionTickets:
ctx.set_options(self._OP_NO_TICKET)
return ctx
| gpl-3.0 |
bitjammer/swift | utils/swift_build_support/swift_build_support/products/ninja.py | 31 | 2042 | # swift_build_support/products/ninja.py -------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
"""
Ninja build
"""
# ----------------------------------------------------------------------------
import os.path
import platform
import sys
from . import product
from .. import cache_util
from .. import shell
class Ninja(product.Product):
@cache_util.reify
def ninja_bin_path(self):
return os.path.join(self.build_dir, 'ninja')
def do_build(self):
if os.path.exists(self.ninja_bin_path):
return
env = None
if platform.system() == "Darwin":
from .. import xcrun
sysroot = xcrun.sdk_path("macosx")
osx_version_min = self.args.darwin_deployment_version_osx
assert sysroot is not None
env = {
"CXX": self.toolchain.cxx,
"CFLAGS": (
"-isysroot {sysroot} -mmacosx-version-min={osx_version}"
).format(sysroot=sysroot, osx_version=osx_version_min),
"LDFLAGS": (
"-mmacosx-version-min={osx_version}"
).format(osx_version=osx_version_min),
}
elif self.toolchain.cxx:
env = {
"CXX": self.toolchain.cxx,
}
# Ninja can only be built in-tree. Copy the source tree to the build
# directory.
shell.rmtree(self.build_dir)
shell.copytree(self.source_dir, self.build_dir)
with shell.pushd(self.build_dir):
shell.call([sys.executable, 'configure.py', '--bootstrap'],
env=env)
| apache-2.0 |
isabernardes/Heriga | Herigaenv/lib/python2.7/site-packages/django/http/cookie.py | 460 | 4390 | from __future__ import unicode_literals
import sys
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import http_cookies
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = http_cookies.SimpleCookie()
try:
_tc.load(str('foo:bar=1'))
_cookie_allows_colon_in_names = True
except http_cookies.CookieError:
_cookie_allows_colon_in_names = False
# Cookie pickling bug is fixed in Python 2.7.9 and Python 3.4.3+
# http://bugs.python.org/issue22775
cookie_pickles_properly = (
(sys.version_info[:2] == (2, 7) and sys.version_info >= (2, 7, 9)) or
sys.version_info >= (3, 4, 3)
)
if _cookie_encodes_correctly and _cookie_allows_colon_in_names and cookie_pickles_properly:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not cookie_pickles_properly:
def __setitem__(self, key, value):
# Apply the fix from http://bugs.python.org/issue22775 where
# it's not fixed in Python itself
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
super(SimpleCookie, self).__setitem__(key, value)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",", "\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if six.PY2 and isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
if not hasattr(self, 'bad_cookies'):
self.bad_cookies = set()
self.bad_cookies.add(key)
dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, http_cookies.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except http_cookies.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
| mit |
soundcloud/selenium | py/selenium/webdriver/firefox/firefox_profile.py | 5 | 14665 | #!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import with_statement
import base64
import copy
import json
import os
import re
import shutil
import sys
import tempfile
import zipfile
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from xml.dom import minidom
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
WEBDRIVER_PREFERENCES = "webdriver_prefs.json"
EXTENSION_NAME = "fxdriver@googlecode.com"
class AddonFormatError(Exception):
"""Exception for not well-formed add-on manifest files"""
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = None
def __init__(self, profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
if not FirefoxProfile.DEFAULT_PREFERENCES:
with open(os.path.join(os.path.dirname(__file__),
WEBDRIVER_PREFERENCES)) as default_prefs:
FirefoxProfile.DEFAULT_PREFERENCES = json.load(default_prefs)
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES['mutable'])
self.native_events_enabled = True
self.profile_dir = profile_directory
self.tempfolder = None
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
self.tempfolder = tempfile.mkdtemp()
newprof = os.path.join(self.tempfolder, "webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs(os.path.join(self.profile_dir, "user.js"))
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
#Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
self.default_preferences[key] = value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
for key, value in FirefoxProfile.DEFAULT_PREFERENCES['frozen'].items():
self.default_preferences[key] = value
self._write_user_prefs(self.default_preferences)
#Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
try:
port = int(port)
if port < 1 or port > 65535:
raise WebDriverException("Port number must be in the range 1..65535")
except (ValueError, TypeError) as e:
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self.default_preferences["webdriver_accept_untrusted_certs"]
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self.default_preferences["webdriver_assume_untrusted_issuer"]
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self.default_preferences['webdriver_enable_native_events']
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
fp = BytesIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.b64encode(fp.getvalue()).decode('UTF-8')
def set_proxy(self, proxy):
import warnings
warnings.warn(
"This method has been deprecated. Please pass in the proxy object to the Driver Object",
DeprecationWarning)
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
self._set_manual_proxy_preference("socks", proxy.socks_proxy)
elif proxy.proxy_type is ProxyType.PAC:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[0])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[1]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
def _read_existing_userjs(self, userjs):
import warnings
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
try:
self.default_preferences[matches.group(1)] = json.loads(matches.group(2))
except:
warnings.warn("(skipping) failed to json.loads existing preference: " +
matches.group(1) + matches.group(2))
except:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/') and not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir)
def _addon_details(self, addon_path):
"""
Returns a dictionary of details about the addon.
:param addon_path: path to the add-on directory or XPI
Returns::
{'id': u'rainbow@colors.org', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
details = {
'id': None,
'unpack': False,
'name': None,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
if not os.path.exists(addon_path):
raise IOError('Add-on path does not exist: %s' % addon_path)
try:
if zipfile.is_zipfile(addon_path):
# Bug 944361 - We cannot use 'with' together with zipFile because
# it will cause an exception thrown in Python 2.6.
try:
compressed_file = zipfile.ZipFile(addon_path, 'r')
manifest = compressed_file.read('install.rdf')
finally:
compressed_file.close()
elif os.path.isdir(addon_path):
with open(os.path.join(addon_path, 'install.rdf'), 'r') as f:
manifest = f.read()
else:
raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path)
except (IOError, KeyError) as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
try:
doc = minidom.parseString(manifest)
# Get the namespaces abbreviations
em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#')
rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
description = doc.getElementsByTagName(rdf + 'Description').item(0)
if description is None:
description = doc.getElementsByTagName('Description').item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({entry: get_text(node)})
if details.get('id') is None:
for i in range(description.attributes.length):
attribute = description.attributes.item(i)
if attribute.name == em + 'id':
details.update({'id': attribute.value})
except Exception as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
# turn unpack into a true/false value
if isinstance(details['unpack'], str):
details['unpack'] = details['unpack'].lower() == 'true'
# If no ID is set, the add-on is invalid
if details.get('id') is None:
raise AddonFormatError('Add-on id could not be found.')
return details
| apache-2.0 |
Gustry/QGIS | tests/src/python/test_qgsserver_wms_getmap_size_server.py | 18 | 2093 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer MaxHeight and MaxWidth Override Options.
From build dir, run: ctest -R PyQgsServerWMSGetMapSizeServer -V
.. note:: This test needs env vars to be set before the server is
configured for the first time, for this
reason it cannot run as a test case of another server
test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Marco Bernasocchi'
__date__ = '01/04/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
import os
# Needed on Qt 5 so that the serialization of XML is consistent among all
# executions
os.environ['QT_HASH_SEED'] = '1'
from qgis.testing import unittest
from test_qgsserver import QgsServerTestBase
from test_qgsserver_wms_getmap_size_project import make_request
class TestQgsServerWMSGetMapSizeServer(QgsServerTestBase):
"""QGIS Server WMS Tests for GetFeatureInfo request"""
# Set to True to re-generate reference files for this class
regenerate_reference = False
def setUp(self):
os.environ['QGIS_SERVER_WMS_MAX_WIDTH'] = '3000'
os.environ['QGIS_SERVER_WMS_MAX_HEIGHT'] = '3000'
super(TestQgsServerWMSGetMapSizeServer, self).setUp()
self.project = os.path.join(self.testdata_path, "test_project_with_size.qgs")
self.expected_too_big = self.strip_version_xmlns(b'<ServiceExceptionReport version="1.3.0" xmlns="http://www.opengis.net/ogc">\n <ServiceException code="InvalidParameterValue">The requested map size is too large</ServiceException>\n</ServiceExceptionReport>\n')
def test_wms_getmap_invalid_size_server(self):
# test the 3000 limit from server is overriding the less conservative 5000 in the project
r = make_request(self, 3001, 3000)
self.assertEqual(self.strip_version_xmlns(r), self.expected_too_big)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
edisonlz/fruit | web_project/base/site-packages/pygments/styles/native.py | 75 | 1938 | # -*- coding: utf-8 -*-
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#202020'
highlight_color = '#404040'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #999999',
Comment.Preproc: 'noitalic bold #cd2828',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ab825',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ab825',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#3677a9',
Name.Builtin: '#24909d',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #447fcf',
Name.Function: '#447fcf',
Name.Namespace: 'underline #447fcf',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ab825',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#d22323',
Generic.Inserted: '#589819',
Generic.Error: '#d22323',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#d22323',
Error: 'bg:#e3d2d2 #a61717'
}
| apache-2.0 |
jmankoff/data | Assignments/jmankoff-explore/lib/flask/exthook.py | 783 | 5087 | # -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from ._compat import reraise
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
| gpl-3.0 |
terranodo/eventkit-cloud | eventkit_cloud/tasks/migrations/0006_auto_20200803_1037.py | 1 | 3279 | # Generated by Django 2.2.5 on 2020-08-03 10:37
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_dataprovider_attribute_class'),
('tasks', '0005_dataprovidertaskrecord_preview'),
]
operations = [
migrations.AddField(
model_name='dataprovidertaskrecord',
name='provider',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_record_provider', to='jobs.DataProvider'),
),
migrations.AlterField(
model_name='exportrun',
name='deleted',
field=models.BooleanField(db_index=True, default=False),
),
migrations.CreateModel(
name='RunZipFile',
fields=[
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('started_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('finished_at', models.DateTimeField(editable=False, null=True)),
('id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('uid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('data_provider_task_records', models.ManyToManyField(to='tasks.DataProviderTaskRecord')),
('downloadable_file', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tasks.FileProducingTaskResult')),
('run', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='zip_files', to='tasks.ExportRun')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExportRunFile',
fields=[
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('uid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('file', models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url='/export_run_files/', location='/var/lib/eventkit/exports_stage/export_run_files'), upload_to='', verbose_name='File')),
('directory', models.CharField(blank=True, help_text='An optional directory name to store the file in.', max_length=100, null=True)),
('provider', models.ForeignKey(blank=True, help_text='An optional data provider to associate the file with.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='file_provider', to='jobs.DataProvider')),
],
options={
'abstract': False,
},
),
]
| bsd-3-clause |
ctiao/platform-external-skia | tools/find_bad_images_in_skps.py | 172 | 7405 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script will take as an argument either a list of skp files or a
set of directories that contains skp files. It will then test each
skp file with the `render_pictures` program. If that program either
spits out any unexpected output or doesn't return 0, I will flag that
skp file as problematic. We then extract all of the embedded images
inside the skp and test each one of them against the
SkImageDecoder::DecodeFile function. Again, we consider any
extraneous output or a bad return value an error. In the event of an
error, we retain the image and print out information about the error.
The output (on stdout) is formatted as a csv document.
A copy of each bad image is left in a directory created by
tempfile.mkdtemp().
"""
import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import test_rendering # skia/trunk/tools. reuse FindPathToProgram()
USAGE = """
Usage:
{command} SKP_FILE [SKP_FILES]
{command} SKP_DIR [SKP_DIRS]\n
Environment variables:
To run multiple worker threads, set NUM_THREADS.
To use a different temporary storage location, set TMPDIR.
"""
def execute_program(args, ignores=None):
"""
Execute a process and waits for it to complete. Returns all
output (stderr and stdout) after (optional) filtering.
@param args is passed into subprocess.Popen().
@param ignores (optional) is a list of regular expression strings
that will be ignored in the output.
@returns a tuple (returncode, output)
"""
if ignores is None:
ignores = []
else:
ignores = [re.compile(ignore) for ignore in ignores]
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = ''.join(
line for line in proc.stdout
if not any(bool(ignore.match(line)) for ignore in ignores))
returncode = proc.wait()
return (returncode, output)
def list_files(paths):
"""
Accepts a list of directories or filenames on the command line.
We do not choose to recurse into directories beyond one level.
"""
class NotAFileException(Exception):
pass
for path in paths:
for globbedpath in glob.iglob(path): # useful on win32
if os.path.isdir(globbedpath):
for filename in os.listdir(globbedpath):
newpath = os.path.join(globbedpath, filename)
if os.path.isfile(newpath):
yield newpath
elif os.path.isfile(globbedpath):
yield globbedpath
else:
raise NotAFileException('{} is not a file'.format(globbedpath))
class BadImageFinder(object):
def __init__(self, directory=None):
self.render_pictures = test_rendering.FindPathToProgram(
'render_pictures')
self.test_image_decoder = test_rendering.FindPathToProgram(
'test_image_decoder')
assert os.path.isfile(self.render_pictures)
assert os.path.isfile(self.test_image_decoder)
if directory is None:
self.saved_image_dir = tempfile.mkdtemp(prefix='skia_skp_test_')
else:
assert os.path.isdir(directory)
self.saved_image_dir = directory
self.bad_image_count = 0
def process_files(self, skp_files):
for path in skp_files:
self.process_file(path)
def process_file(self, skp_file):
assert self.saved_image_dir is not None
assert os.path.isfile(skp_file)
args = [self.render_pictures, '--readPath', skp_file]
ignores = ['^process_in', '^deserializ', '^drawing...', '^Non-defaul']
returncode, output = execute_program(args, ignores)
if (returncode == 0) and not output:
return
temp_image_dir = tempfile.mkdtemp(prefix='skia_skp_test___')
args = [ self.render_pictures, '--readPath', skp_file,
'--writePath', temp_image_dir, '--writeEncodedImages']
subprocess.call(args, stderr=open(os.devnull,'w'),
stdout=open(os.devnull,'w'))
for image_name in os.listdir(temp_image_dir):
image_path = os.path.join(temp_image_dir, image_name)
assert(os.path.isfile(image_path))
args = [self.test_image_decoder, image_path]
returncode, output = execute_program(args, [])
if (returncode == 0) and not output:
os.remove(image_path)
continue
try:
shutil.move(image_path, self.saved_image_dir)
except (shutil.Error,):
# If this happens, don't stop the entire process,
# just warn the user.
os.remove(image_path)
sys.stderr.write('{0} is a repeat.\n'.format(image_name))
self.bad_image_count += 1
if returncode == 2:
returncode = 'SkImageDecoder::DecodeFile returns false'
elif returncode == 0:
returncode = 'extra verbosity'
assert output
elif returncode == -11:
returncode = 'segmentation violation'
else:
returncode = 'returncode: {}'.format(returncode)
output = output.strip().replace('\n',' ').replace('"','\'')
suffix = image_name[-3:]
output_line = '"{0}","{1}","{2}","{3}","{4}"\n'.format(
returncode, suffix, skp_file, image_name, output)
sys.stdout.write(output_line)
sys.stdout.flush()
os.rmdir(temp_image_dir)
return
def main(main_argv):
if not main_argv or main_argv[0] in ['-h', '-?', '-help', '--help']:
sys.stderr.write(USAGE.format(command=__file__))
return 1
if 'NUM_THREADS' in os.environ:
number_of_threads = int(os.environ['NUM_THREADS'])
if number_of_threads < 1:
number_of_threads = 1
else:
number_of_threads = 1
os.environ['skia_images_png_suppressDecoderWarnings'] = 'true'
os.environ['skia_images_jpeg_suppressDecoderWarnings'] = 'true'
temp_dir = tempfile.mkdtemp(prefix='skia_skp_test_')
sys.stderr.write('Directory for bad images: {}\n'.format(temp_dir))
sys.stdout.write('"Error","Filetype","SKP File","Image File","Output"\n')
sys.stdout.flush()
finders = [
BadImageFinder(temp_dir) for index in xrange(number_of_threads)]
arguments = [[] for index in xrange(number_of_threads)]
for index, item in enumerate(list_files(main_argv)):
## split up the given targets among the worker threads
arguments[index % number_of_threads].append(item)
threads = [
threading.Thread(
target=BadImageFinder.process_files, args=(finder,argument))
for finder, argument in zip(finders, arguments)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
number = sum(finder.bad_image_count for finder in finders)
sys.stderr.write('Number of bad images found: {}\n'.format(number))
return 0
if __name__ == '__main__':
exit(main(sys.argv[1:]))
# LocalWords: skp stdout csv
| bsd-3-clause |
cdepman/falcon_api | site-packages/psycopg2cffi/_impl/connection.py | 1 | 28889 | from __future__ import unicode_literals
import threading
import weakref
from functools import wraps
import six
from psycopg2cffi._impl import consts
from psycopg2cffi._impl import encodings as _enc
from psycopg2cffi._impl import exceptions
from psycopg2cffi._impl.libpq import libpq, ffi
from psycopg2cffi._impl import util
from psycopg2cffi._impl.adapters import bytes_to_ascii, ascii_to_bytes
from psycopg2cffi._impl.cursor import Cursor
from psycopg2cffi._impl.lobject import LargeObject
from psycopg2cffi._impl.notify import Notify
from psycopg2cffi._impl.xid import Xid
# Map between isolation levels names and values and back.
_isolevels = {
'': consts.ISOLATION_LEVEL_AUTOCOMMIT,
'read uncommitted': consts.ISOLATION_LEVEL_READ_UNCOMMITTED,
'read committed': consts.ISOLATION_LEVEL_READ_COMMITTED,
'repeatable read': consts.ISOLATION_LEVEL_REPEATABLE_READ,
'serializable': consts.ISOLATION_LEVEL_SERIALIZABLE,
'default': -1,
}
for k, v in list(_isolevels.items()):
_isolevels[v] = k
del k, v
_green_callback = None
def check_closed(func):
@wraps(func)
def check_closed_(self, *args, **kwargs):
if self.closed:
raise exceptions.InterfaceError('connection already closed')
return func(self, *args, **kwargs)
return check_closed_
def check_notrans(func):
@wraps(func)
def check_notrans_(self, *args, **kwargs):
if self.status != consts.STATUS_READY:
raise exceptions.ProgrammingError('not valid in transaction')
return func(self, *args, **kwargs)
return check_notrans_
def check_tpc(func):
@wraps(func)
def check_tpc_(self, *args, **kwargs):
if self._tpc_xid:
raise exceptions.ProgrammingError(
'%s cannot be used during a two-phase transaction'
% func.__name__)
return func(self, *args, **kwargs)
return check_tpc_
def check_tpc_supported(func):
@wraps(func)
def check_tpc_supported_(self, *args, **kwargs):
if self.server_version < 80100:
raise exceptions.NotSupportedError(
"server version %s: two-phase transactions not supported"
% self.server_version)
return func(self, *args, **kwargs)
return check_tpc_supported_
def check_async(func):
@wraps(func)
def check_async_(self, *args, **kwargs):
if self._async:
raise exceptions.ProgrammingError(
'%s cannot be used in asynchronous mode' % func.__name__)
return func(self, *args, **kwargs)
return check_async_
class Connection(object):
# Various exceptions which should be accessible via the Connection
# class according to dbapi 2.0
Error = exceptions.Error
DatabaseError = exceptions.DatabaseError
IntegrityError = exceptions.IntegrityError
InterfaceError = exceptions.InterfaceError
InternalError = exceptions.InternalError
NotSupportedError = exceptions.NotSupportedError
OperationalError = exceptions.OperationalError
ProgrammingError = exceptions.ProgrammingError
Warning = exceptions.Warning
def __init__(self, dsn, async=False):
self.dsn = dsn
self.status = consts.STATUS_SETUP
self._encoding = None
self._py_enc = None
self._closed = 0
self._cancel = ffi.NULL
self._typecasts = {}
self._tpc_xid = None
self._notifies = []
self._autocommit = False
self._pgconn = None
self._equote = False
self._lock = threading.RLock()
self.notices = []
self.cursor_factory = None
# The number of commits/rollbacks done so far
self._mark = 0
self._async = async
self._async_status = consts.ASYNC_DONE
self._async_cursor = None
self_ref = weakref.ref(self)
self._notice_callback = ffi.callback(
'void(void *, const char *)',
lambda arg, message: self_ref()._process_notice(
arg, bytes_to_ascii(ffi.string(message))))
if not self._async:
self._connect_sync()
else:
self._connect_async()
def _connect_sync(self):
self._pgconn = libpq.PQconnectdb(self.dsn.encode('utf-8'))
if not self._pgconn:
raise exceptions.OperationalError('PQconnectdb() failed')
elif libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:
raise self._create_exception()
# Register notice processor
libpq.PQsetNoticeProcessor(
self._pgconn, self._notice_callback, ffi.NULL)
self.status = consts.STATUS_READY
self._setup()
def _connect_async(self):
"""Create an async connection.
The connection will be completed banging on poll():
First with self._conn_poll_connecting() that will finish connection,
then with self._poll_setup_async() that will do the same job
of self._setup().
"""
self._pgconn = libpq.PQconnectStart(ascii_to_bytes(self.dsn))
if not self._pgconn:
raise exceptions.OperationalError('PQconnectStart() failed')
elif libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:
raise self._create_exception()
libpq.PQsetNoticeProcessor(
self._pgconn, self._notice_callback, ffi.NULL)
def __del__(self):
self._close()
@check_closed
def __enter__(self):
return self
def __exit__(self, type, name, tb):
if type is None:
self.commit()
else:
self.rollback()
def close(self):
return self._close()
@check_closed
@check_async
@check_tpc
def rollback(self):
self._rollback()
@check_closed
@check_async
@check_tpc
def commit(self):
self._commit()
@check_closed
@check_async
def reset(self):
with self._lock:
self._execute_command(
"ABORT; RESET ALL; SET SESSION AUTHORIZATION DEFAULT;")
self.status = consts.STATUS_READY
self._mark += 1
self._autocommit = False
self._tpc_xid = None
def _get_guc(self, name):
"""Return the value of a configuration parameter."""
with self._lock:
query = 'SHOW %s' % name
if _green_callback:
pgres = self._execute_green(query)
else:
pgres = libpq.PQexec(self._pgconn, ascii_to_bytes(query))
if not pgres or libpq.PQresultStatus(pgres) != libpq.PGRES_TUPLES_OK:
raise exceptions.OperationalError("can't fetch %s" % name)
rv = bytes_to_ascii(ffi.string(libpq.PQgetvalue(pgres, 0, 0)))
libpq.PQclear(pgres)
return rv
def _set_guc(self, name, value):
"""Set the value of a configuration parameter."""
if value.lower() != 'default':
value = util.quote_string(self, value)
else:
value = b'default'
self._execute_command(ascii_to_bytes('SET %s TO ' % name) + value)
def _set_guc_onoff(self, name, value):
"""Set the value of a configuration parameter to a boolean.
The string 'default' is accepted too.
"""
if isinstance(value, six.string_types) and \
value.lower() in (b'default', 'default'):
value = 'default'
else:
value = 'on' if value else 'off'
self._set_guc(name, value)
@property
@check_closed
def isolation_level(self):
if self._autocommit:
return consts.ISOLATION_LEVEL_AUTOCOMMIT
else:
name = self._get_guc('default_transaction_isolation')
return _isolevels[name.lower()]
@check_async
def set_isolation_level(self, level):
if level < 0 or level > 4:
raise ValueError('isolation level must be between 0 and 4')
prev = self.isolation_level
if prev == level:
return
self._rollback()
if level == consts.ISOLATION_LEVEL_AUTOCOMMIT:
return self.set_session(autocommit=True)
else:
return self.set_session(isolation_level=level, autocommit=False)
@check_closed
@check_notrans
def set_session(self, isolation_level=None, readonly=None, deferrable=None,
autocommit=None):
if isolation_level is not None:
if isinstance(isolation_level, int):
if isolation_level < 1 or isolation_level > 4:
raise ValueError('isolation level must be between 1 and 4')
isolation_level = _isolevels[isolation_level]
elif isinstance(isolation_level, six.string_types):
if isinstance(isolation_level, six.binary_type):
isolation_level = bytes_to_ascii(isolation_level)
isolation_level = isolation_level.lower()
if not isolation_level or isolation_level not in _isolevels:
raise ValueError("bad value for isolation level: '%s'" %
isolation_level)
else:
raise TypeError("bad isolation level: '%r'" % isolation_level)
if self.server_version < 80000:
if isolation_level == 'read uncommitted':
isolation_level = 'read committed'
elif isolation_level == 'repeatable read':
isolation_level = 'serializable'
self._set_guc("default_transaction_isolation", isolation_level)
if readonly is not None:
self._set_guc_onoff('default_transaction_read_only', readonly)
if deferrable is not None:
self._set_guc_onoff('default_transaction_deferrable', deferrable)
if autocommit is not None:
self._autocommit = bool(autocommit)
@property
def autocommit(self):
return self._autocommit
@autocommit.setter
def autocommit(self, value):
self.set_session(autocommit=value)
@property
def async(self):
return self._async
@check_closed
def get_backend_pid(self):
return libpq.PQbackendPID(self._pgconn)
def get_parameter_status(self, parameter):
p = libpq.PQparameterStatus(self._pgconn, ascii_to_bytes(parameter))
return bytes_to_ascii(ffi.string(p)) if p != ffi.NULL else None
def get_transaction_status(self):
return libpq.PQtransactionStatus(self._pgconn)
def cursor(self, name=None, cursor_factory=None,
withhold=False, scrollable=None):
if cursor_factory is None:
cursor_factory = self.cursor_factory or Cursor
cur = cursor_factory(self, name)
if not isinstance(cur, Cursor):
raise TypeError(
"cursor factory must be subclass of %s" %
'.'.join([Cursor.__module__, Cursor.__name__]))
if withhold:
cur.withhold = withhold
if scrollable is not None:
cur.scrollable = scrollable
if name and self._async:
raise exceptions.ProgrammingError(
"asynchronous connections cannot produce named cursors")
cur._mark = self._mark
return cur
@check_closed
@check_tpc
def cancel(self):
err_length = 256
errbuf = ffi.new('char[]', err_length)
if libpq.PQcancel(self._cancel, errbuf, err_length) == 0:
raise exceptions.OperationalError(ffi.string(errbuf))
def isexecuting(self):
if not self._async:
return False
if self.status != consts.STATUS_READY:
return True
if self._async_cursor is not None:
return True
return False
@property
def encoding(self):
return self._encoding
@check_closed
@check_async
def set_client_encoding(self, encoding):
encoding = _enc.normalize(encoding)
if self.encoding == encoding:
return
pyenc = _enc.encodings[encoding]
self._rollback()
self._set_guc('client_encoding', encoding)
self._encoding = encoding
self._py_enc = pyenc
@property
def notifies(self):
return self._notifies
@property
@check_closed
def protocol_version(self):
return libpq.PQprotocolVersion(self._pgconn)
@property
@check_closed
def server_version(self):
return libpq.PQserverVersion(self._pgconn)
def fileno(self):
return libpq.PQsocket(self._pgconn)
@property
def closed(self):
return self._closed
@check_closed
@check_tpc_supported
def xid(self, format_id, gtrid, bqual):
return Xid(format_id, gtrid, bqual)
@check_closed
@check_async
@check_tpc_supported
def tpc_begin(self, xid):
if not isinstance(xid, Xid):
xid = Xid.from_string(xid)
if self.status != consts.STATUS_READY:
raise exceptions.ProgrammingError(
'tpc_begin must be called outside a transaction')
if self._autocommit:
raise exceptions.ProgrammingError(
"tpc_begin can't be called in autocommit mode")
self._begin_transaction()
self._tpc_xid = xid
@check_closed
@check_async
@check_tpc_supported
def tpc_commit(self, xid=None):
self._finish_tpc('COMMIT PREPARED', self._commit, xid)
@check_closed
@check_async
@check_tpc_supported
def tpc_rollback(self, xid=None):
self._finish_tpc('ROLLBACK PREPARED', self._rollback, xid)
@check_closed
@check_async
def tpc_prepare(self):
if not self._tpc_xid:
raise exceptions.ProgrammingError(
'prepare must be called inside a two-phase transaction')
self._execute_tpc_command('PREPARE TRANSACTION', self._tpc_xid)
self.status = consts.STATUS_PREPARED
@check_closed
@check_async
@check_tpc_supported
def tpc_recover(self):
return Xid.tpc_recover(self)
def lobject(self, oid=0, mode='', new_oid=0, new_file=None,
lobject_factory=LargeObject):
obj = lobject_factory(self, oid, mode, new_oid, new_file)
return obj
def poll(self):
if self.status == consts.STATUS_SETUP:
self.status = consts.STATUS_CONNECTING
return consts.POLL_WRITE
if self.status == consts.STATUS_CONNECTING:
res = self._poll_connecting()
if res == consts.POLL_OK and self._async:
return self._poll_setup_async()
return res
if self.status in (consts.STATUS_READY, consts.STATUS_BEGIN,
consts.STATUS_PREPARED):
res = self._poll_query()
if res == consts.POLL_OK and self._async and self._async_cursor:
# Get the cursor object from the weakref
curs = self._async_cursor()
if curs is None:
util.pq_clear_async(self._pgconn)
raise exceptions.InterfaceError(
"the asynchronous cursor has disappeared")
libpq.PQclear(curs._pgres)
curs._pgres = util.pq_get_last_result(self._pgconn)
try:
curs._pq_fetch()
finally:
self._async_cursor = None
return res
return consts.POLL_ERROR
def _poll_connecting(self):
"""poll during a connection attempt until the connection has
established.
"""
status_map = {
libpq.PGRES_POLLING_OK: consts.POLL_OK,
libpq.PGRES_POLLING_READING: consts.POLL_READ,
libpq.PGRES_POLLING_WRITING: consts.POLL_WRITE,
libpq.PGRES_POLLING_FAILED: consts.POLL_ERROR,
libpq.PGRES_POLLING_ACTIVE: consts.POLL_ERROR,
}
res = status_map.get(libpq.PQconnectPoll(self._pgconn), None)
if res is None:
return consts.POLL_ERROR
elif res == consts.POLL_ERROR:
raise self._create_exception()
return res
def _poll_query(self):
"""Poll the connection for the send query/retrieve result phase
Advance the async_status (usually going WRITE -> READ -> DONE) but
don't mess with the connection status.
"""
if self._async_status == consts.ASYNC_WRITE:
ret = self._poll_advance_write(libpq.PQflush(self._pgconn))
elif self._async_status == consts.ASYNC_READ:
if self._async:
ret = self._poll_advance_read(self._is_busy())
else:
ret = self._poll_advance_read(self._is_busy())
elif self._async_status == consts.ASYNC_DONE:
ret = self._poll_advance_read(self._is_busy())
else:
ret = consts.POLL_ERROR
return ret
def _poll_advance_write(self, flush):
"""Advance to the next state after an attempt of flushing output"""
if flush == 0:
self._async_status = consts.ASYNC_READ
return consts.POLL_READ
if flush == 1:
return consts.POLL_WRITE
if flush == -1:
raise self._create_exception()
return consts.POLL_ERROR
def _poll_advance_read(self, busy):
"""Advance to the next state after a call to a _is_busy* method"""
if busy == 0:
self._async_status = consts.ASYNC_DONE
return consts.POLL_OK
if busy == 1:
return consts.POLL_READ
return consts.POLL_ERROR
def _poll_setup_async(self):
"""Advance to the next state during an async connection setup
If the connection is green, this is performed by the regular sync
code so the queries are sent by conn_setup() while in
CONN_STATUS_READY state.
"""
if self.status == consts.STATUS_CONNECTING:
util.pq_set_non_blocking(self._pgconn, 1, True)
self._equote = self._get_equote()
self._get_encoding()
self._have_cancel_key()
self._autocommit = True
# If the current datestyle is not compatible (not ISO) then
# force it to ISO
if not self._iso_compatible_datestyle():
self.status = consts.STATUS_DATESTYLE
if libpq.PQsendQuery(self._pgconn, b"SET DATESTYLE TO 'ISO'"):
self._async_status = consts.ASYNC_WRITE
return consts.POLL_WRITE
else:
raise self._create_exception()
self.status = consts.STATUS_READY
return consts.POLL_OK
if self.status == consts.STATUS_DATESTYLE:
res = self._poll_query()
if res != consts.POLL_OK:
return res
pgres = util.pq_get_last_result(self._pgconn)
if not pgres or \
libpq.PQresultStatus(pgres) != libpq.PGRES_COMMAND_OK:
raise exceptions.OperationalError("can't set datetyle to ISO")
libpq.PQclear(pgres)
self.status = consts.STATUS_READY
return consts.POLL_OK
return consts.POLL_ERROR
def _setup(self):
self._equote = self._get_equote()
self._get_encoding()
self._have_cancel_key()
with self._lock:
# If the current datestyle is not compatible (not ISO) then
# force it to ISO
if not self._iso_compatible_datestyle():
self.status = consts.STATUS_DATESTYLE
self._set_guc('datestyle', 'ISO')
self._closed = 0
def _have_cancel_key(self):
if self._cancel != ffi.NULL:
tmp, self._cancel = self._cancel, ffi.NULL
libpq.PQfreeCancel(tmp)
self._cancel = libpq.PQgetCancel(self._pgconn)
if self._cancel == ffi.NULL:
raise exceptions.OperationalError("can't get cancellation key")
def _begin_transaction(self):
if self.status == consts.STATUS_READY and not self._autocommit:
self._execute_command('BEGIN')
self.status = consts.STATUS_BEGIN
def _execute_command(self, command):
with self._lock:
if _green_callback:
pgres = self._execute_green(command)
else:
pgres = libpq.PQexec(self._pgconn, ascii_to_bytes(command))
if not pgres:
raise self._create_exception()
try:
pgstatus = libpq.PQresultStatus(pgres)
if pgstatus != libpq.PGRES_COMMAND_OK:
exc = self._create_exception(pgres=pgres)
pgres = None # ownership transferred to exc
raise exc
finally:
if pgres:
libpq.PQclear(pgres)
def _execute_tpc_command(self, command, xid):
cmd = b' '.join([
ascii_to_bytes(command),
util.quote_string(self, str(xid))])
self._execute_command(cmd)
self._mark += 1
def _execute_green(self, query):
"""Execute version for green threads"""
if self._async_cursor:
raise exceptions.ProgrammingError(
"a single async query can be executed on the same connection")
self._async_cursor = True
if not libpq.PQsendQuery(self._pgconn, ascii_to_bytes(query)):
self._async_cursor = None
return
self._async_status = consts.ASYNC_WRITE
try:
_green_callback(self)
except Exception:
self.close()
raise
else:
return util.pq_get_last_result(self._pgconn)
finally:
self._async_cursor = None
self._async_status = consts.ASYNC_DONE
def _finish_tpc(self, command, fallback, xid):
if xid:
# committing/aborting a received transaction.
if self.status != consts.STATUS_READY:
raise exceptions.ProgrammingError(
"tpc_commit/tpc_rollback with a xid "
"must be called outside a transaction")
self._execute_tpc_command(command, xid)
else:
# committing/aborting our own transaction.
if not self._tpc_xid:
raise exceptions.ProgrammingError(
"tpc_commit/tpc_rollback with no parameter "
"must be called in a two-phase transaction")
if self.status == consts.STATUS_BEGIN:
fallback()
elif self.status == consts.STATUS_PREPARED:
self._execute_tpc_command(command, self._tpc_xid)
else:
raise exceptions.InterfaceError(
'unexpected state in tpc_commit/tpc_rollback')
self.status = consts.STATUS_READY
self._tpc_xid = None
def _close(self):
if self._closed == 1:
return
if self._cancel:
libpq.PQfreeCancel(self._cancel)
self._cancel = ffi.NULL
if self._pgconn:
libpq.PQfinish(self._pgconn)
self._pgconn = None
self._closed = 1
def _commit(self):
with self._lock:
if self._autocommit or self.status != consts.STATUS_BEGIN:
return
self._mark += 1
try:
self._execute_command('COMMIT')
finally:
self.status = consts.STATUS_READY
def _rollback(self):
with self._lock:
if self._autocommit or self.status != consts.STATUS_BEGIN:
return
self._mark += 1
try:
self._execute_command('ROLLBACK')
finally:
self.status = consts.STATUS_READY
def _get_encoding(self):
"""Retrieving encoding"""
client_encoding = self.get_parameter_status('client_encoding')
self._encoding = _enc.normalize(client_encoding)
self._py_enc = _enc.encodings[self._encoding]
def _get_equote(self):
ret = libpq.PQparameterStatus(
self._pgconn, b'standard_conforming_strings')
return ret and ffi.string(ret) == b'off' or False
def _is_busy(self):
with self._lock:
if libpq.PQconsumeInput(self._pgconn) == 0:
raise exceptions.OperationalError(
ffi.string(libpq.PQerrorMessage(self._pgconn)))
res = libpq.PQisBusy(self._pgconn)
self._process_notifies()
return res
def _process_notice(self, arg, message):
"""Store the given message in `self.notices`
Also delete older entries to make sure there are no more then 50
entries in the list.
"""
self.notices.append(message)
length = len(self.notices)
if length > 50:
del self.notices[:length - 50]
def _process_notifies(self):
while True:
pg_notify = libpq.PQnotifies(self._pgconn)
if not pg_notify:
break
notify = Notify(
pg_notify.be_pid,
ffi.string(pg_notify.relname).decode(self._py_enc),
ffi.string(pg_notify.extra).decode(self._py_enc))
self._notifies.append(notify)
libpq.PQfreemem(pg_notify)
def _create_exception(self, pgres=None, msg=None, cursor=None):
"""Return the appropriate exception instance for the current status.
IMPORTANT: the new exception takes ownership of pgres: if pgres is
passed as parameter, the callee must delete its pointer (e.g. it may
be set to null). If there is a pgres in the cursor it is "stolen": the
cursor will have it set to Null.
"""
assert pgres is None or cursor is None, \
"cannot specify pgres and cursor together"
if cursor and cursor._pgres:
pgres, cursor._pgres = cursor._pgres, ffi.NULL
exc_type = exceptions.OperationalError
code = pgmsg = None
# _py_enc can be not initialized yet in case of errors when
# establishing the connection
err_enc = self._py_enc or 'utf-8'
# If no custom message is passed then get the message from postgres.
# If pgres is available then we first try to get the message for the
# last command, and then the error message for the connection
if pgres:
pgmsg = libpq.PQresultErrorMessage(pgres)
pgmsg = ffi.string(pgmsg).decode(err_enc, 'replace') \
if pgmsg else None
# Get the correct exception class based on the error code
code = libpq.PQresultErrorField(pgres, libpq.LIBPQ_DIAG_SQLSTATE)
if code != ffi.NULL:
code = bytes_to_ascii(ffi.string(code))
exc_type = util.get_exception_for_sqlstate(code)
else:
code = None
exc_type = exceptions.DatabaseError
if not pgmsg:
pgmsg = libpq.PQerrorMessage(self._pgconn)
pgmsg = ffi.string(pgmsg).decode(err_enc, 'replace') \
if pgmsg else None
if msg is None and pgmsg:
msg = pgmsg
for prefix in ("ERROR: ", "FATAL: ", "PANIC: "):
if msg.startswith(prefix):
msg = msg[len(prefix):]
break
# Clear the connection if the status is CONNECTION_BAD (fatal error)
if self._pgconn and libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:
self._closed = 2
exc = exc_type(msg)
exc.pgcode = code
exc.pgerror = pgmsg
exc.cursor = cursor
exc._pgres = pgres
return exc
def _have_wait_callback(self):
return bool(_green_callback)
def _iso_compatible_datestyle(self):
''' Return whether connection DateStyle is ISO-compatible
'''
datestyle = libpq.PQparameterStatus(self._pgconn, b'DateStyle')
return datestyle != ffi.NULL and \
ffi.string(datestyle).startswith(b'ISO')
def _connect(dsn, connection_factory=None, async=False):
if connection_factory is None:
connection_factory = Connection
# Mimic the construction method as used by psycopg2, which notes:
# Here we are breaking the connection.__init__ interface defined
# by psycopg2. So, if not requiring an async conn, avoid passing
# the async parameter.
if async:
return connection_factory(dsn, async=True)
else:
return connection_factory(dsn)
| mit |
jupyter/dockerspawner | tests/test_systemuserspawner.py | 1 | 1246 | """Tests for SwarmSpawner"""
from getpass import getuser
import pytest
from jupyterhub.tests.mocking import public_url
from jupyterhub.tests.test_api import add_user
from jupyterhub.tests.test_api import api_request
from jupyterhub.utils import url_path_join
from tornado.httpclient import AsyncHTTPClient
from dockerspawner import SystemUserSpawner
# Mark all tests in this file as asyncio
pytestmark = pytest.mark.asyncio
async def test_start_stop(systemuserspawner_configured_app):
app = systemuserspawner_configured_app
name = getuser()
add_user(app.db, app, name=name)
user = app.users[name]
assert isinstance(user.spawner, SystemUserSpawner)
token = user.new_api_token()
# start the server
r = await api_request(app, "users", name, "server", method="post")
while r.status_code == 202:
# request again
r = await api_request(app, "users", name, "server", method="post")
assert r.status_code == 201, r.text
url = url_path_join(public_url(app, user), "api/status")
resp = await AsyncHTTPClient().fetch(
url, headers={"Authorization": "token %s" % token}
)
assert resp.effective_url == url
resp.rethrow()
assert "kernels" in resp.body.decode("utf-8")
| bsd-3-clause |
aferr/TimingCompartments | src/dev/Platform.py | 20 | 1801 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class Platform(SimObject):
type = 'Platform'
abstract = True
intrctrl = Param.IntrControl(Parent.any, "interrupt controller")
| bsd-3-clause |
guptaarchit/nacsniff | main.py | 1 | 9500 | import wx
import sys
from capture import *
import netifaces
import wx.lib.buttons
from sniffer_socket import *
from help_window import *
from scanner import *
class ListCtrlLeft(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, None, wx.ID_ANY, 'wxBitmapButton', size=(300, 350))
self.list1=[]
capture_image = "images/capture.jpg"
image1 = wx.Image(capture_image, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.capture_button = wx.BitmapButton(parent, id=-1, bitmap=image1,pos=(70, 70), size = (200, 100))
self.capture_button.Bind(wx.EVT_BUTTON, self.capture)
interface_image = "images/interface.jpg"
image2 = wx.Image(interface_image, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.interface_button = wx.BitmapButton(parent, id=-1, bitmap=image2,pos=(70, 200), size = (200, 100))
self.interface_button.Bind(wx.EVT_BUTTON, self.OnSelect)
self.parent=parent
scanner_image = "images/scanner.jpg"
image2 = wx.Image(scanner_image, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.interface_button = wx.BitmapButton(parent, id=-1, bitmap=image2,pos=(70, 330), size = (200, 100))
self.interface_button.Bind(wx.EVT_BUTTON, self.OnScannerSelect)
self.parent=parent
def OnSize(self, event):
size = self.parent.GetSize()
self.SetColumnWidth(0, size.x-5)
event.Skip()
def OnScannerSelect(self, event):
self.scan_initiater = scanner_window(None,-1, 'Online Hosts',"lo")
self.scan_initiater.Show()
def OnSelect(self, event):
window = self.parent.GetGrandParent().FindWindowByName('interface_list')
window.LoadData()
def OnDeSelect(self, event):
index = event.GetIndex()
self.SetItemBackgroundColour(index, 'WHITE')
def OnFocus(self, event):
self.SetItemBackgroundColour(0, 'red')
def capture(self, event):
window = self.parent.GetGrandParent().FindWindowByName('interface_list')
interface_selected=window = window.OnSelect1()
print interface_selected
def OnQuit(self, e): #to quit the program through menu item in file menu
self.Close()
# if 'scan_initiater' in locals():
self.scan_initiater.Close()
class interface_list(wx.ListCtrl):
def __init__(self, parent, id):
wx.ListCtrl.__init__(self, parent, id, style=wx.LC_REPORT | wx.LC_HRULES | wx.LC_NO_HEADER | wx.LC_SINGLE_SEL)
#self.listbox = wx.ListBox(parent, id)
self.parent = parent
self.interface_list=[]
self.selection=""
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.selected_interface)
self.InsertColumn(0, '')
def selected_interface(self,event):
index = event.GetIndex()
self.selection=self.interface_list[len(self.interface_list)-index-1]
print self.selection
def OnSelect1(self):
if self.selection != "":
self.capture_frame = Nacsnif(self,-1, 'nacsnif',self.selection)
self.capture_frame.Show()
else:
pass #Add warning window
def OnSize(self, event):
size = self.parent.GetSize()
self.SetColumnWidth(0, size.x-5)
event.Skip()
def LoadData(self):
self.DeleteAllItems()
list2=netifaces.interfaces()
for item in list2:
self.InsertStringItem(0,item)
self.interface_list.append(item)
def OnQuit(self, e): #to quit the program through menu item in file menu
self.Close()
self.capture_frame.Close()
class Reader(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(700,600))
disp_menu_bar(self)
hbox = wx.BoxSizer(wx.HORIZONTAL)
splitter = wx.SplitterWindow(self, -1, style=wx.SP_LIVE_UPDATE|wx.SP_NOBORDER)
vbox1 = wx.BoxSizer(wx.VERTICAL)
panel1 = wx.Panel(splitter, -1)
panel1.SetBackgroundColour('blue')
panel11 = wx.Panel(panel1, -1, size=(-1, 240))
panel11.SetBackgroundColour('BLUE')
st1 = wx.StaticText(panel11, -1, 'CAPTURE', (135, 5))
st1.SetForegroundColour('WHITE')
panel12 = wx.Panel(panel1, -1, style=wx.BORDER_SUNKEN)
vbox = wx.BoxSizer(wx.VERTICAL)
self.list1 = ListCtrlLeft(panel12, -1)
vbox.Add(self.list1, 1, wx.EXPAND)
panel12.SetSizer(vbox)
#panel12.SetBackgroundColour('green')
vbox1.Add(panel11, 0, wx.EXPAND)
vbox1.Add(panel12, 1, wx.EXPAND)
panel1.SetSizer(vbox1)
vbox2 = wx.BoxSizer(wx.VERTICAL)
panel2 = wx.Panel(splitter, -1)
panel21 = wx.Panel(panel2, -1, size=(-1, 40), style=wx.NO_BORDER)
st2 = wx.StaticText(panel21, -1, 'INTERFACES', (135, 5))
st2.SetForegroundColour('WHITE')
panel21.SetBackgroundColour('BLUE')
panel22 = wx.Panel(panel2, -1, style=wx.BORDER_RAISED)
vbox3 = wx.BoxSizer(wx.VERTICAL)
self.list2 = interface_list(panel22, -1)
self.list2.SetName('interface_list')
vbox3.Add(self.list2, 1, wx.EXPAND)
panel22.SetSizer(vbox3)
panel22.SetBackgroundColour('WHITE')
vbox2.Add(panel21, 0, wx.EXPAND)
vbox2.Add(panel22, 1, wx.EXPAND)
panel2.SetSizer(vbox2)
#tool_obj=Toolbar()
#tool_obj.toolbar_icons()
self.toolbar_icons()
hbox.Add(splitter, 1, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
self.SetSizer(hbox)
self.CreateStatusBar()
splitter.SplitVertically(panel1, panel2)
self.Centre()
self.Show(True)
def toolbar_icons(self):
toolbar = self.CreateToolBar()
self.count=5
toolbar.AddLabelTool(1, 'Exit', wx.Bitmap('images/close.png'))
toolbar.AddLabelTool(2,'help',wx.Bitmap('images/help.png'))
toolbar.AddLabelTool(3,'open',wx.Bitmap('images/open.png'))
toolbar.AddLabelTool(4,'start',wx.Bitmap('images/start.png'))
toolbar.AddLabelTool(5,'list',wx.Bitmap('images/list.png'))
toolbar.AddLabelTool(6,'pause',wx.Bitmap('images/pause.png'))
toolbar.AddLabelTool(7,'Restart',wx.Bitmap('images/restart.png'))
toolbar.AddLabelTool(8,'stop',wx.Bitmap('images/stop.png'))
toolbar.AddLabelTool(9,'undo',wx.Bitmap('images/undo.png'))
toolbar.AddLabelTool(10,'redo',wx.Bitmap('images/redo.png'))
toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.OnQuit, id=1)
self.Bind(wx.EVT_TOOL, self.OnUndo, id=9)
self.Bind(wx.EVT_TOOL, self.OnRedo, id=10)
self.Bind(wx.EVT_TOOL,self.OnHelp,id=2)
def OnQuit(self, e): #to quit the program through menu item in file menu
self.list1.Close()
self.list2.Close()
#
#self.l.OnClose()
self.Close()
def OnHelp(self,e):
self.l=HelpWindow(None, -1, 'HelpWindow')
def OnUndo(self, e):
if self.count > 1 and self.count <= 5:
self.count = self.count - 1
if self.count == 1:
self.toolbar.EnableTool(wx.ID_UNDO, False)
if self.count == 4:
self.toolbar.EnableTool(wx.ID_REDO, True)
def OnRedo(self, e):
if self.count < 5 and self.count >= 1:
self.count = self.count + 1
if self.count == 5:
self.toolbar.EnableTool(wx.ID_REDO, False)
if self.count == 2:
self.toolbar.EnableTool(wx.ID_UNDO, True)
def OnSave(self, e):
pass
def OnOpen(self, e):
pass
def disp_menu_bar(tempo):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
new_item = fileMenu.Append(wx.ID_NEW, 'New', 'New application')
open_item = fileMenu.Append(wx.ID_OPEN, 'Open', 'Open application')
save_as_item = fileMenu.Append(wx.ID_SAVE, 'Save', 'Save application')
exit_item = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
menubar.Append(fileMenu, '&File')
edit_menu = wx.Menu()
copy_item = edit_menu.Append(wx.ID_COPY, 'Copy', 'Copy application')
find_packet_item = edit_menu.Append(wx.ID_ANY, 'Find Packet', 'Find packet application')
find_next_item = edit_menu.Append(wx.ID_ANY, 'Find Next', 'Finding next packet application')
find_previous_item = edit_menu.Append(wx.ID_ANY, 'Find Previous', 'finding Previous packet application')
menubar.Append(edit_menu, '&Edit')
go_menu = wx.Menu()
back = go_menu.Append(wx.ID_ANY, 'Back', 'back application')
forward = go_menu.Append(wx.ID_ANY, 'Forward', 'forward application')
go_to_packet = go_menu.Append(wx.ID_ANY, 'Go to Packet', 'go to packet application')
go_to_corresponding_packet = go_menu.Append(wx.ID_ANY, 'Go to corresponding Packet', 'go to corrsponding packet application')
menubar.Append(go_menu, '&Go')
help_menu=wx.Menu()
Help=help_menu.Append(wx.ID_ANY,'Help','about application')
menubar.Append(help_menu,'&Help')
tempo.SetMenuBar(menubar)
tempo.Bind(wx.EVT_MENU, tempo.OnQuit, exit_item)
tempo.Bind(wx.EVT_MENU,tempo.OnHelp,Help)
tempo.Bind(wx.EVT_MENU,tempo.OnSave,save_as_item)
#tempo.Bind(wx.EVT_MENU,tempo.OnOpen,open_item)
# wx.EVT_MENU(tempo,101,tempo.OnSave )
app = wx.App()
#app.setStyle('cleanlooks')
#sys.stderr=open("test.txt")
Reader(None, -1, 'NACSNIFF')
app.MainLoop() | gpl-2.0 |
idem2lyon/persomov | libs/enzyme/fourcc.py | 163 | 31535 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
import string
import re
import struct
__all__ = ['resolve']
def resolve(code):
"""
Transform a twocc or fourcc code into a name. Returns a 2-tuple of (cc,
codec) where both are strings and cc is a string in the form '0xXX' if it's
a twocc, or 'ABCD' if it's a fourcc. If the given code is not a known
twocc or fourcc, the return value will be (None, 'Unknown'), unless the
code is otherwise a printable string in which case it will be returned as
the codec.
"""
if isinstance(code, basestring):
codec = u'Unknown'
# Check for twocc
if re.match(r'^0x[\da-f]{1,4}$', code, re.I):
# Twocc in hex form
return code, TWOCC.get(int(code, 16), codec)
elif code.isdigit() and 0 <= int(code) <= 0xff:
# Twocc in decimal form
return hex(int(code)), TWOCC.get(int(code), codec)
elif len(code) == 2:
code = struct.unpack('H', code)[0]
return hex(code), TWOCC.get(code, codec)
elif len(code) != 4 and len([x for x in code if x not in string.printable]) == 0:
# Code is a printable string.
codec = unicode(code)
if code[:2] == 'MS' and code[2:].upper() in FOURCC:
code = code[2:]
if code.upper() in FOURCC:
return code.upper(), unicode(FOURCC[code.upper()])
return None, codec
elif isinstance(code, (int, long)):
return hex(code), TWOCC.get(code, u'Unknown')
return None, u'Unknown'
TWOCC = {
0x0000: 'Unknown Wave Format',
0x0001: 'PCM',
0x0002: 'Microsoft ADPCM',
0x0003: 'IEEE Float',
0x0004: 'Compaq Computer VSELP',
0x0005: 'IBM CVSD',
0x0006: 'A-Law',
0x0007: 'mu-Law',
0x0008: 'Microsoft DTS',
0x0009: 'Microsoft DRM',
0x0010: 'OKI ADPCM',
0x0011: 'Intel DVI/IMA ADPCM',
0x0012: 'Videologic MediaSpace ADPCM',
0x0013: 'Sierra Semiconductor ADPCM',
0x0014: 'Antex Electronics G.723 ADPCM',
0x0015: 'DSP Solutions DigiSTD',
0x0016: 'DSP Solutions DigiFIX',
0x0017: 'Dialogic OKI ADPCM',
0x0018: 'MediaVision ADPCM',
0x0019: 'Hewlett-Packard CU',
0x0020: 'Yamaha ADPCM',
0x0021: 'Speech Compression Sonarc',
0x0022: 'DSP Group TrueSpeech',
0x0023: 'Echo Speech EchoSC1',
0x0024: 'Audiofile AF36',
0x0025: 'Audio Processing Technology APTX',
0x0026: 'AudioFile AF10',
0x0027: 'Prosody 1612',
0x0028: 'LRC',
0x0030: 'Dolby AC2',
0x0031: 'Microsoft GSM 6.10',
0x0032: 'MSNAudio',
0x0033: 'Antex Electronics ADPCME',
0x0034: 'Control Resources VQLPC',
0x0035: 'DSP Solutions DigiREAL',
0x0036: 'DSP Solutions DigiADPCM',
0x0037: 'Control Resources CR10',
0x0038: 'Natural MicroSystems VBXADPCM',
0x0039: 'Crystal Semiconductor IMA ADPCM',
0x003A: 'EchoSC3',
0x003B: 'Rockwell ADPCM',
0x003C: 'Rockwell Digit LK',
0x003D: 'Xebec',
0x0040: 'Antex Electronics G.721 ADPCM',
0x0041: 'G.728 CELP',
0x0042: 'MSG723',
0x0043: 'IBM AVC ADPCM',
0x0045: 'ITU-T G.726 ADPCM',
0x0050: 'MPEG 1, Layer 1,2',
0x0052: 'RT24',
0x0053: 'PAC',
0x0055: 'MPEG Layer 3',
0x0059: 'Lucent G.723',
0x0060: 'Cirrus',
0x0061: 'ESPCM',
0x0062: 'Voxware',
0x0063: 'Canopus Atrac',
0x0064: 'G.726 ADPCM',
0x0065: 'G.722 ADPCM',
0x0066: 'DSAT',
0x0067: 'DSAT Display',
0x0069: 'Voxware Byte Aligned',
0x0070: 'Voxware AC8',
0x0071: 'Voxware AC10',
0x0072: 'Voxware AC16',
0x0073: 'Voxware AC20',
0x0074: 'Voxware MetaVoice',
0x0075: 'Voxware MetaSound',
0x0076: 'Voxware RT29HW',
0x0077: 'Voxware VR12',
0x0078: 'Voxware VR18',
0x0079: 'Voxware TQ40',
0x0080: 'Softsound',
0x0081: 'Voxware TQ60',
0x0082: 'MSRT24',
0x0083: 'G.729A',
0x0084: 'MVI MV12',
0x0085: 'DF G.726',
0x0086: 'DF GSM610',
0x0088: 'ISIAudio',
0x0089: 'Onlive',
0x0091: 'SBC24',
0x0092: 'Dolby AC3 SPDIF',
0x0093: 'MediaSonic G.723',
0x0094: 'Aculab PLC Prosody 8KBPS',
0x0097: 'ZyXEL ADPCM',
0x0098: 'Philips LPCBB',
0x0099: 'Packed',
0x00A0: 'Malden Electronics PHONYTALK',
0x00FF: 'AAC',
0x0100: 'Rhetorex ADPCM',
0x0101: 'IBM mu-law',
0x0102: 'IBM A-law',
0x0103: 'IBM AVC Adaptive Differential Pulse Code Modulation',
0x0111: 'Vivo G.723',
0x0112: 'Vivo Siren',
0x0123: 'Digital G.723',
0x0125: 'Sanyo LD ADPCM',
0x0130: 'Sipro Lab Telecom ACELP.net',
0x0131: 'Sipro Lab Telecom ACELP.4800',
0x0132: 'Sipro Lab Telecom ACELP.8V3',
0x0133: 'Sipro Lab Telecom ACELP.G.729',
0x0134: 'Sipro Lab Telecom ACELP.G.729A',
0x0135: 'Sipro Lab Telecom ACELP.KELVIN',
0x0140: 'Windows Media Video V8',
0x0150: 'Qualcomm PureVoice',
0x0151: 'Qualcomm HalfRate',
0x0155: 'Ring Zero Systems TUB GSM',
0x0160: 'Windows Media Audio V1 / DivX audio (WMA)',
0x0161: 'Windows Media Audio V7 / V8 / V9',
0x0162: 'Windows Media Audio Professional V9',
0x0163: 'Windows Media Audio Lossless V9',
0x0170: 'UNISYS NAP ADPCM',
0x0171: 'UNISYS NAP ULAW',
0x0172: 'UNISYS NAP ALAW',
0x0173: 'UNISYS NAP 16K',
0x0200: 'Creative Labs ADPCM',
0x0202: 'Creative Labs Fastspeech8',
0x0203: 'Creative Labs Fastspeech10',
0x0210: 'UHER Informatic ADPCM',
0x0215: 'Ulead DV ACM',
0x0216: 'Ulead DV ACM',
0x0220: 'Quarterdeck',
0x0230: 'I-link Worldwide ILINK VC',
0x0240: 'Aureal Semiconductor RAW SPORT',
0x0241: 'ESST AC3',
0x0250: 'Interactive Products HSX',
0x0251: 'Interactive Products RPELP',
0x0260: 'Consistent Software CS2',
0x0270: 'Sony ATRAC3 (SCX, same as MiniDisk LP2)',
0x0300: 'Fujitsu FM Towns Snd',
0x0400: 'BTV Digital',
0x0401: 'Intel Music Coder (IMC)',
0x0402: 'Ligos Indeo Audio',
0x0450: 'QDesign Music',
0x0680: 'VME VMPCM',
0x0681: 'AT&T Labs TPC',
0x0700: 'YMPEG Alpha',
0x08AE: 'ClearJump LiteWave',
0x1000: 'Olivetti GSM',
0x1001: 'Olivetti ADPCM',
0x1002: 'Olivetti CELP',
0x1003: 'Olivetti SBC',
0x1004: 'Olivetti OPR',
0x1100: 'Lernout & Hauspie LH Codec',
0x1101: 'Lernout & Hauspie CELP codec',
0x1102: 'Lernout & Hauspie SBC codec',
0x1103: 'Lernout & Hauspie SBC codec',
0x1104: 'Lernout & Hauspie SBC codec',
0x1400: 'Norris',
0x1401: 'AT&T ISIAudio',
0x1500: 'Soundspace Music Compression',
0x181C: 'VoxWare RT24 speech codec',
0x181E: 'Lucent elemedia AX24000P Music codec',
0x1C07: 'Lucent SX8300P speech codec',
0x1C0C: 'Lucent SX5363S G.723 compliant codec',
0x1F03: 'CUseeMe DigiTalk (ex-Rocwell)',
0x1FC4: 'NCT Soft ALF2CD ACM',
0x2000: 'AC3',
0x2001: 'Dolby DTS (Digital Theater System)',
0x2002: 'RealAudio 1 / 2 14.4',
0x2003: 'RealAudio 1 / 2 28.8',
0x2004: 'RealAudio G2 / 8 Cook (low bitrate)',
0x2005: 'RealAudio 3 / 4 / 5 Music (DNET)',
0x2006: 'RealAudio 10 AAC (RAAC)',
0x2007: 'RealAudio 10 AAC+ (RACP)',
0x3313: 'makeAVIS',
0x4143: 'Divio MPEG-4 AAC audio',
0x434C: 'LEAD Speech',
0x564C: 'LEAD Vorbis',
0x674F: 'Ogg Vorbis (mode 1)',
0x6750: 'Ogg Vorbis (mode 2)',
0x6751: 'Ogg Vorbis (mode 3)',
0x676F: 'Ogg Vorbis (mode 1+)',
0x6770: 'Ogg Vorbis (mode 2+)',
0x6771: 'Ogg Vorbis (mode 3+)',
0x7A21: 'GSM-AMR (CBR, no SID)',
0x7A22: 'GSM-AMR (VBR, including SID)',
0xDFAC: 'DebugMode SonicFoundry Vegas FrameServer ACM Codec',
0xF1AC: 'Free Lossless Audio Codec FLAC',
0xFFFE: 'Extensible wave format',
0xFFFF: 'development'
}
FOURCC = {
'1978': 'A.M.Paredes predictor (LossLess)',
'2VUY': 'Optibase VideoPump 8-bit 4:2:2 Component YCbCr',
'3IV0': 'MPEG4-based codec 3ivx',
'3IV1': '3ivx v1',
'3IV2': '3ivx v2',
'3IVD': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'3IVX': 'MPEG4-based codec 3ivx',
'8BPS': 'Apple QuickTime Planar RGB with Alpha-channel',
'AAS4': 'Autodesk Animator codec (RLE)',
'AASC': 'Autodesk Animator',
'ABYR': 'Kensington ABYR',
'ACTL': 'Streambox ACT-L2',
'ADV1': 'Loronix WaveCodec',
'ADVJ': 'Avid M-JPEG Avid Technology Also known as AVRn',
'AEIK': 'Intel Indeo Video 3.2',
'AEMI': 'Array VideoONE MPEG1-I Capture',
'AFLC': 'Autodesk Animator FLC',
'AFLI': 'Autodesk Animator FLI',
'AHDV': 'CineForm 10-bit Visually Perfect HD',
'AJPG': '22fps JPEG-based codec for digital cameras',
'AMPG': 'Array VideoONE MPEG',
'ANIM': 'Intel RDX (ANIM)',
'AP41': 'AngelPotion Definitive',
'AP42': 'AngelPotion Definitive',
'ASLC': 'AlparySoft Lossless Codec',
'ASV1': 'Asus Video v1',
'ASV2': 'Asus Video v2',
'ASVX': 'Asus Video 2.0 (audio)',
'ATM4': 'Ahead Nero Digital MPEG-4 Codec',
'AUR2': 'Aura 2 Codec - YUV 4:2:2',
'AURA': 'Aura 1 Codec - YUV 4:1:1',
'AV1X': 'Avid 1:1x (Quick Time)',
'AVC1': 'H.264 AVC',
'AVD1': 'Avid DV (Quick Time)',
'AVDJ': 'Avid Meridien JFIF with Alpha-channel',
'AVDN': 'Avid DNxHD (Quick Time)',
'AVDV': 'Avid DV',
'AVI1': 'MainConcept Motion JPEG Codec',
'AVI2': 'MainConcept Motion JPEG Codec',
'AVID': 'Avid Motion JPEG',
'AVIS': 'Wrapper for AviSynth',
'AVMP': 'Avid IMX (Quick Time)',
'AVR ': 'Avid ABVB/NuVista MJPEG with Alpha-channel',
'AVRN': 'Avid Motion JPEG',
'AVUI': 'Avid Meridien Uncompressed with Alpha-channel',
'AVUP': 'Avid 10bit Packed (Quick Time)',
'AYUV': '4:4:4 YUV (AYUV)',
'AZPR': 'Quicktime Apple Video',
'AZRP': 'Quicktime Apple Video',
'BGR ': 'Uncompressed BGR32 8:8:8:8',
'BGR(15)': 'Uncompressed BGR15 5:5:5',
'BGR(16)': 'Uncompressed BGR16 5:6:5',
'BGR(24)': 'Uncompressed BGR24 8:8:8',
'BHIV': 'BeHere iVideo',
'BINK': 'RAD Game Tools Bink Video',
'BIT ': 'BI_BITFIELDS (Raw RGB)',
'BITM': 'Microsoft H.261',
'BLOX': 'Jan Jezabek BLOX MPEG Codec',
'BLZ0': 'DivX for Blizzard Decoder Filter',
'BT20': 'Conexant Prosumer Video',
'BTCV': 'Conexant Composite Video Codec',
'BTVC': 'Conexant Composite Video',
'BW00': 'BergWave (Wavelet)',
'BW10': 'Data Translation Broadway MPEG Capture',
'BXBG': 'BOXX BGR',
'BXRG': 'BOXX RGB',
'BXY2': 'BOXX 10-bit YUV',
'BXYV': 'BOXX YUV',
'CC12': 'Intel YUV12',
'CDV5': 'Canopus SD50/DVHD',
'CDVC': 'Canopus DV',
'CDVH': 'Canopus SD50/DVHD',
'CFCC': 'Digital Processing Systems DPS Perception',
'CFHD': 'CineForm 10-bit Visually Perfect HD',
'CGDI': 'Microsoft Office 97 Camcorder Video',
'CHAM': 'Winnov Caviara Champagne',
'CJPG': 'Creative WebCam JPEG',
'CLJR': 'Cirrus Logic YUV 4 pixels',
'CLLC': 'Canopus LossLess',
'CLPL': 'YV12',
'CMYK': 'Common Data Format in Printing',
'COL0': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'COL1': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'CPLA': 'Weitek 4:2:0 YUV Planar',
'CRAM': 'Microsoft Video 1 (CRAM)',
'CSCD': 'RenderSoft CamStudio lossless Codec',
'CTRX': 'Citrix Scalable Video Codec',
'CUVC': 'Canopus HQ',
'CVID': 'Radius Cinepak',
'CWLT': 'Microsoft Color WLT DIB',
'CYUV': 'Creative Labs YUV',
'CYUY': 'ATI YUV',
'D261': 'H.261',
'D263': 'H.263',
'DAVC': 'Dicas MPEGable H.264/MPEG-4 AVC base profile codec',
'DC25': 'MainConcept ProDV Codec',
'DCAP': 'Pinnacle DV25 Codec',
'DCL1': 'Data Connection Conferencing Codec',
'DCT0': 'WniWni Codec',
'DFSC': 'DebugMode FrameServer VFW Codec',
'DIB ': 'Full Frames (Uncompressed)',
'DIV1': 'FFmpeg-4 V1 (hacked MS MPEG-4 V1)',
'DIV2': 'MS MPEG-4 V2',
'DIV3': 'DivX v3 MPEG-4 Low-Motion',
'DIV4': 'DivX v3 MPEG-4 Fast-Motion',
'DIV5': 'DIV5',
'DIV6': 'DivX MPEG-4',
'DIVX': 'DivX',
'DM4V': 'Dicas MPEGable MPEG-4',
'DMB1': 'Matrox Rainbow Runner hardware MJPEG',
'DMB2': 'Paradigm MJPEG',
'DMK2': 'ViewSonic V36 PDA Video',
'DP02': 'DynaPel MPEG-4',
'DPS0': 'DPS Reality Motion JPEG',
'DPSC': 'DPS PAR Motion JPEG',
'DRWX': 'Pinnacle DV25 Codec',
'DSVD': 'DSVD',
'DTMT': 'Media-100 Codec',
'DTNT': 'Media-100 Codec',
'DUCK': 'Duck True Motion 1.0',
'DV10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DV25': 'Matrox DVCPRO codec',
'DV50': 'Matrox DVCPRO50 codec',
'DVAN': 'DVAN',
'DVC ': 'Apple QuickTime DV (DVCPRO NTSC)',
'DVCP': 'Apple QuickTime DV (DVCPRO PAL)',
'DVCS': 'MainConcept DV Codec',
'DVE2': 'InSoft DVE-2 Videoconferencing',
'DVH1': 'Pinnacle DVHD100',
'DVHD': 'DV 1125 lines at 30.00 Hz or 1250 lines at 25.00 Hz',
'DVIS': 'VSYNC DualMoon Iris DV codec',
'DVL ': 'Radius SoftDV 16:9 NTSC',
'DVLP': 'Radius SoftDV 16:9 PAL',
'DVMA': 'Darim Vision DVMPEG',
'DVOR': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DVPN': 'Apple QuickTime DV (DV NTSC)',
'DVPP': 'Apple QuickTime DV (DV PAL)',
'DVR1': 'TARGA2000 Codec',
'DVRS': 'VSYNC DualMoon Iris DV codec',
'DVSD': 'DV',
'DVSL': 'DV compressed in SD (SDL)',
'DVX1': 'DVX1000SP Video Decoder',
'DVX2': 'DVX2000S Video Decoder',
'DVX3': 'DVX3000S Video Decoder',
'DX50': 'DivX v5',
'DXGM': 'Electronic Arts Game Video codec',
'DXSB': 'DivX Subtitles Codec',
'DXT1': 'Microsoft DirectX Compressed Texture (DXT1)',
'DXT2': 'Microsoft DirectX Compressed Texture (DXT2)',
'DXT3': 'Microsoft DirectX Compressed Texture (DXT3)',
'DXT4': 'Microsoft DirectX Compressed Texture (DXT4)',
'DXT5': 'Microsoft DirectX Compressed Texture (DXT5)',
'DXTC': 'Microsoft DirectX Compressed Texture (DXTC)',
'DXTN': 'Microsoft DirectX Compressed Texture (DXTn)',
'EKQ0': 'Elsa EKQ0',
'ELK0': 'Elsa ELK0',
'EM2V': 'Etymonix MPEG-2 I-frame',
'EQK0': 'Elsa graphics card quick codec',
'ESCP': 'Eidos Escape',
'ETV1': 'eTreppid Video ETV1',
'ETV2': 'eTreppid Video ETV2',
'ETVC': 'eTreppid Video ETVC',
'FFDS': 'FFDShow supported',
'FFV1': 'FFDShow supported',
'FFVH': 'FFVH codec',
'FLIC': 'Autodesk FLI/FLC Animation',
'FLJP': 'D-Vision Field Encoded Motion JPEG',
'FLV1': 'FLV1 codec',
'FMJP': 'D-Vision fieldbased ISO MJPEG',
'FRLE': 'SoftLab-NSK Y16 + Alpha RLE',
'FRWA': 'SoftLab-Nsk Forward Motion JPEG w/ alpha channel',
'FRWD': 'SoftLab-Nsk Forward Motion JPEG',
'FRWT': 'SoftLab-NSK Vision Forward Motion JPEG with Alpha-channel',
'FRWU': 'SoftLab-NSK Vision Forward Uncompressed',
'FVF1': 'Iterated Systems Fractal Video Frame',
'FVFW': 'ff MPEG-4 based on XviD codec',
'GEPJ': 'White Pine (ex Paradigm Matrix) Motion JPEG Codec',
'GJPG': 'Grand Tech GT891x Codec',
'GLCC': 'GigaLink AV Capture codec',
'GLZW': 'Motion LZW',
'GPEG': 'Motion JPEG',
'GPJM': 'Pinnacle ReelTime MJPEG Codec',
'GREY': 'Apparently a duplicate of Y800',
'GWLT': 'Microsoft Greyscale WLT DIB',
'H260': 'H.260',
'H261': 'H.261',
'H262': 'H.262',
'H263': 'H.263',
'H264': 'H.264 AVC',
'H265': 'H.265',
'H266': 'H.266',
'H267': 'H.267',
'H268': 'H.268',
'H269': 'H.269',
'HD10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'HDX4': 'Jomigo HDX4',
'HFYU': 'Huffman Lossless Codec',
'HMCR': 'Rendition Motion Compensation Format (HMCR)',
'HMRR': 'Rendition Motion Compensation Format (HMRR)',
'I263': 'Intel ITU H.263 Videoconferencing (i263)',
'I420': 'Intel Indeo 4',
'IAN ': 'Intel RDX',
'ICLB': 'InSoft CellB Videoconferencing',
'IDM0': 'IDM Motion Wavelets 2.0',
'IF09': 'Microsoft H.261',
'IGOR': 'Power DVD',
'IJPG': 'Intergraph JPEG',
'ILVC': 'Intel Layered Video',
'ILVR': 'ITU-T H.263+',
'IMC1': 'IMC1',
'IMC2': 'IMC2',
'IMC3': 'IMC3',
'IMC4': 'IMC4',
'IMJG': 'Accom SphereOUS MJPEG with Alpha-channel',
'IPDV': 'I-O Data Device Giga AVI DV Codec',
'IPJ2': 'Image Power JPEG2000',
'IR21': 'Intel Indeo 2.1',
'IRAW': 'Intel YUV Uncompressed',
'IUYV': 'Interlaced version of UYVY (line order 0,2,4 then 1,3,5 etc)',
'IV30': 'Ligos Indeo 3.0',
'IV31': 'Ligos Indeo 3.1',
'IV32': 'Ligos Indeo 3.2',
'IV33': 'Ligos Indeo 3.3',
'IV34': 'Ligos Indeo 3.4',
'IV35': 'Ligos Indeo 3.5',
'IV36': 'Ligos Indeo 3.6',
'IV37': 'Ligos Indeo 3.7',
'IV38': 'Ligos Indeo 3.8',
'IV39': 'Ligos Indeo 3.9',
'IV40': 'Ligos Indeo Interactive 4.0',
'IV41': 'Ligos Indeo Interactive 4.1',
'IV42': 'Ligos Indeo Interactive 4.2',
'IV43': 'Ligos Indeo Interactive 4.3',
'IV44': 'Ligos Indeo Interactive 4.4',
'IV45': 'Ligos Indeo Interactive 4.5',
'IV46': 'Ligos Indeo Interactive 4.6',
'IV47': 'Ligos Indeo Interactive 4.7',
'IV48': 'Ligos Indeo Interactive 4.8',
'IV49': 'Ligos Indeo Interactive 4.9',
'IV50': 'Ligos Indeo Interactive 5.0',
'IY41': 'Interlaced version of Y41P (line order 0,2,4,...,1,3,5...)',
'IYU1': '12 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYU2': '24 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYUV': 'Intel Indeo iYUV 4:2:0',
'JBYR': 'Kensington JBYR',
'JFIF': 'Motion JPEG (FFmpeg)',
'JPEG': 'Still Image JPEG DIB',
'JPG ': 'JPEG compressed',
'JPGL': 'Webcam JPEG Light',
'KMVC': 'Karl Morton\'s Video Codec',
'KPCD': 'Kodak Photo CD',
'L261': 'Lead Technologies H.261',
'L263': 'Lead Technologies H.263',
'LAGS': 'Lagarith LossLess',
'LBYR': 'Creative WebCam codec',
'LCMW': 'Lead Technologies Motion CMW Codec',
'LCW2': 'LEADTools MCMW 9Motion Wavelet)',
'LEAD': 'LEAD Video Codec',
'LGRY': 'Lead Technologies Grayscale Image',
'LJ2K': 'LEADTools JPEG2000',
'LJPG': 'LEAD MJPEG Codec',
'LMP2': 'LEADTools MPEG2',
'LOCO': 'LOCO Lossless Codec',
'LSCR': 'LEAD Screen Capture',
'LSVM': 'Vianet Lighting Strike Vmail (Streaming)',
'LZO1': 'LZO compressed (lossless codec)',
'M261': 'Microsoft H.261',
'M263': 'Microsoft H.263',
'M4CC': 'ESS MPEG4 Divio codec',
'M4S2': 'Microsoft MPEG-4 (M4S2)',
'MC12': 'ATI Motion Compensation Format (MC12)',
'MC24': 'MainConcept Motion JPEG Codec',
'MCAM': 'ATI Motion Compensation Format (MCAM)',
'MCZM': 'Theory MicroCosm Lossless 64bit RGB with Alpha-channel',
'MDVD': 'Alex MicroDVD Video (hacked MS MPEG-4)',
'MDVF': 'Pinnacle DV/DV50/DVHD100',
'MHFY': 'A.M.Paredes mhuffyYUV (LossLess)',
'MJ2C': 'Morgan Multimedia Motion JPEG2000',
'MJPA': 'Pinnacle ReelTime MJPG hardware codec',
'MJPB': 'Motion JPEG codec',
'MJPG': 'Motion JPEG DIB',
'MJPX': 'Pegasus PICVideo Motion JPEG',
'MMES': 'Matrox MPEG-2 I-frame',
'MNVD': 'MindBend MindVid LossLess',
'MP2A': 'MPEG-2 Audio',
'MP2T': 'MPEG-2 Transport Stream',
'MP2V': 'MPEG-2 Video',
'MP41': 'Microsoft MPEG-4 V1 (enhansed H263)',
'MP42': 'Microsoft MPEG-4 (low-motion)',
'MP43': 'Microsoft MPEG-4 (fast-motion)',
'MP4A': 'MPEG-4 Audio',
'MP4S': 'Microsoft MPEG-4 (MP4S)',
'MP4T': 'MPEG-4 Transport Stream',
'MP4V': 'Apple QuickTime MPEG-4 native',
'MPEG': 'MPEG-1',
'MPG1': 'FFmpeg-1',
'MPG2': 'FFmpeg-1',
'MPG3': 'Same as Low motion DivX MPEG-4',
'MPG4': 'Microsoft MPEG-4 Video High Speed Compressor',
'MPGI': 'Sigma Designs MPEG',
'MPNG': 'Motion PNG codec',
'MRCA': 'Martin Regen Codec',
'MRLE': 'Run Length Encoding',
'MSS1': 'Windows Screen Video',
'MSS2': 'Windows Media 9',
'MSUC': 'MSU LossLess',
'MSVC': 'Microsoft Video 1',
'MSZH': 'Lossless codec (ZIP compression)',
'MTGA': 'Motion TGA images (24, 32 bpp)',
'MTX1': 'Matrox MTX1',
'MTX2': 'Matrox MTX2',
'MTX3': 'Matrox MTX3',
'MTX4': 'Matrox MTX4',
'MTX5': 'Matrox MTX5',
'MTX6': 'Matrox MTX6',
'MTX7': 'Matrox MTX7',
'MTX8': 'Matrox MTX8',
'MTX9': 'Matrox MTX9',
'MV12': 'MV12',
'MVI1': 'Motion Pixels MVI',
'MVI2': 'Motion Pixels MVI',
'MWV1': 'Aware Motion Wavelets',
'MYUV': 'Media-100 844/X Uncompressed',
'NAVI': 'nAVI',
'NDIG': 'Ahead Nero Digital MPEG-4 Codec',
'NHVU': 'NVidia Texture Format (GEForce 3)',
'NO16': 'Theory None16 64bit uncompressed RAW',
'NT00': 'NewTek LigtWave HDTV YUV with Alpha-channel',
'NTN1': 'Nogatech Video Compression 1',
'NTN2': 'Nogatech Video Compression 2 (GrabBee hardware coder)',
'NUV1': 'NuppelVideo',
'NV12': '8-bit Y plane followed by an interleaved U/V plane with 2x2 subsampling',
'NV21': 'As NV12 with U and V reversed in the interleaved plane',
'NVDS': 'nVidia Texture Format',
'NVHS': 'NVidia Texture Format (GEForce 3)',
'NVS0': 'nVidia GeForce Texture',
'NVS1': 'nVidia GeForce Texture',
'NVS2': 'nVidia GeForce Texture',
'NVS3': 'nVidia GeForce Texture',
'NVS4': 'nVidia GeForce Texture',
'NVS5': 'nVidia GeForce Texture',
'NVT0': 'nVidia GeForce Texture',
'NVT1': 'nVidia GeForce Texture',
'NVT2': 'nVidia GeForce Texture',
'NVT3': 'nVidia GeForce Texture',
'NVT4': 'nVidia GeForce Texture',
'NVT5': 'nVidia GeForce Texture',
'PDVC': 'I-O Data Device Digital Video Capture DV codec',
'PGVV': 'Radius Video Vision',
'PHMO': 'IBM Photomotion',
'PIM1': 'Pegasus Imaging',
'PIM2': 'Pegasus Imaging',
'PIMJ': 'Pegasus Imaging Lossless JPEG',
'PIXL': 'MiroVideo XL (Motion JPEG)',
'PNG ': 'Apple PNG',
'PNG1': 'Corecodec.org CorePNG Codec',
'PVEZ': 'Horizons Technology PowerEZ',
'PVMM': 'PacketVideo Corporation MPEG-4',
'PVW2': 'Pegasus Imaging Wavelet Compression',
'PVWV': 'Pegasus Imaging Wavelet 2000',
'PXLT': 'Apple Pixlet (Wavelet)',
'Q1.0': 'Q-Team QPEG 1.0 (www.q-team.de)',
'Q1.1': 'Q-Team QPEG 1.1 (www.q-team.de)',
'QDGX': 'Apple QuickDraw GX',
'QPEG': 'Q-Team QPEG 1.0',
'QPEQ': 'Q-Team QPEG 1.1',
'R210': 'BlackMagic YUV (Quick Time)',
'R411': 'Radius DV NTSC YUV',
'R420': 'Radius DV PAL YUV',
'RAVI': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAV_': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAW ': 'Full Frames (Uncompressed)',
'RGB ': 'Full Frames (Uncompressed)',
'RGB(15)': 'Uncompressed RGB15 5:5:5',
'RGB(16)': 'Uncompressed RGB16 5:6:5',
'RGB(24)': 'Uncompressed RGB24 8:8:8',
'RGB1': 'Uncompressed RGB332 3:3:2',
'RGBA': 'Raw RGB with alpha',
'RGBO': 'Uncompressed RGB555 5:5:5',
'RGBP': 'Uncompressed RGB565 5:6:5',
'RGBQ': 'Uncompressed RGB555X 5:5:5 BE',
'RGBR': 'Uncompressed RGB565X 5:6:5 BE',
'RGBT': 'Computer Concepts 32-bit support',
'RL4 ': 'RLE 4bpp RGB',
'RL8 ': 'RLE 8bpp RGB',
'RLE ': 'Microsoft Run Length Encoder',
'RLE4': 'Run Length Encoded 4',
'RLE8': 'Run Length Encoded 8',
'RMP4': 'REALmagic MPEG-4 Video Codec',
'ROQV': 'Id RoQ File Video Decoder',
'RPZA': 'Apple Video 16 bit "road pizza"',
'RT21': 'Intel Real Time Video 2.1',
'RTV0': 'NewTek VideoToaster',
'RUD0': 'Rududu video codec',
'RV10': 'RealVideo codec',
'RV13': 'RealVideo codec',
'RV20': 'RealVideo G2',
'RV30': 'RealVideo 8',
'RV40': 'RealVideo 9',
'RVX ': 'Intel RDX (RVX )',
'S263': 'Sorenson Vision H.263',
'S422': 'Tekram VideoCap C210 YUV 4:2:2',
'SAMR': 'Adaptive Multi-Rate (AMR) audio codec',
'SAN3': 'MPEG-4 codec (direct copy of DivX 3.11a)',
'SDCC': 'Sun Communication Digital Camera Codec',
'SEDG': 'Samsung MPEG-4 codec',
'SFMC': 'CrystalNet Surface Fitting Method',
'SHR0': 'BitJazz SheerVideo',
'SHR1': 'BitJazz SheerVideo',
'SHR2': 'BitJazz SheerVideo',
'SHR3': 'BitJazz SheerVideo',
'SHR4': 'BitJazz SheerVideo',
'SHR5': 'BitJazz SheerVideo',
'SHR6': 'BitJazz SheerVideo',
'SHR7': 'BitJazz SheerVideo',
'SJPG': 'CUseeMe Networks Codec',
'SL25': 'SoftLab-NSK DVCPRO',
'SL50': 'SoftLab-NSK DVCPRO50',
'SLDV': 'SoftLab-NSK Forward DV Draw codec',
'SLIF': 'SoftLab-NSK MPEG2 I-frames',
'SLMJ': 'SoftLab-NSK Forward MJPEG',
'SMC ': 'Apple Graphics (SMC) codec (256 color)',
'SMSC': 'Radius SMSC',
'SMSD': 'Radius SMSD',
'SMSV': 'WorldConnect Wavelet Video',
'SNOW': 'SNOW codec',
'SP40': 'SunPlus YUV',
'SP44': 'SunPlus Aiptek MegaCam Codec',
'SP53': 'SunPlus Aiptek MegaCam Codec',
'SP54': 'SunPlus Aiptek MegaCam Codec',
'SP55': 'SunPlus Aiptek MegaCam Codec',
'SP56': 'SunPlus Aiptek MegaCam Codec',
'SP57': 'SunPlus Aiptek MegaCam Codec',
'SP58': 'SunPlus Aiptek MegaCam Codec',
'SPIG': 'Radius Spigot',
'SPLC': 'Splash Studios ACM Audio Codec',
'SPRK': 'Sorenson Spark',
'SQZ2': 'Microsoft VXTreme Video Codec V2',
'STVA': 'ST CMOS Imager Data (Bayer)',
'STVB': 'ST CMOS Imager Data (Nudged Bayer)',
'STVC': 'ST CMOS Imager Data (Bunched)',
'STVX': 'ST CMOS Imager Data (Extended CODEC Data Format)',
'STVY': 'ST CMOS Imager Data (Extended CODEC Data Format with Correction Data)',
'SV10': 'Sorenson Video R1',
'SVQ1': 'Sorenson Video R3',
'SVQ3': 'Sorenson Video 3 (Apple Quicktime 5)',
'SWC1': 'MainConcept Motion JPEG Codec',
'T420': 'Toshiba YUV 4:2:0',
'TGA ': 'Apple TGA (with Alpha-channel)',
'THEO': 'FFVFW Supported Codec',
'TIFF': 'Apple TIFF (with Alpha-channel)',
'TIM2': 'Pinnacle RAL DVI',
'TLMS': 'TeraLogic Motion Intraframe Codec (TLMS)',
'TLST': 'TeraLogic Motion Intraframe Codec (TLST)',
'TM20': 'Duck TrueMotion 2.0',
'TM2A': 'Duck TrueMotion Archiver 2.0',
'TM2X': 'Duck TrueMotion 2X',
'TMIC': 'TeraLogic Motion Intraframe Codec (TMIC)',
'TMOT': 'Horizons Technology TrueMotion S',
'TR20': 'Duck TrueMotion RealTime 2.0',
'TRLE': 'Akula Alpha Pro Custom AVI (LossLess)',
'TSCC': 'TechSmith Screen Capture Codec',
'TV10': 'Tecomac Low-Bit Rate Codec',
'TVJP': 'TrueVision Field Encoded Motion JPEG',
'TVMJ': 'Truevision TARGA MJPEG Hardware Codec',
'TY0N': 'Trident TY0N',
'TY2C': 'Trident TY2C',
'TY2N': 'Trident TY2N',
'U263': 'UB Video StreamForce H.263',
'U<Y ': 'Discreet UC YUV 4:2:2:4 10 bit',
'U<YA': 'Discreet UC YUV 4:2:2:4 10 bit (with Alpha-channel)',
'UCOD': 'eMajix.com ClearVideo',
'ULTI': 'IBM Ultimotion',
'UMP4': 'UB Video MPEG 4',
'UYNV': 'UYVY',
'UYVP': 'YCbCr 4:2:2',
'UYVU': 'SoftLab-NSK Forward YUV codec',
'UYVY': 'UYVY 4:2:2 byte ordering',
'V210': 'Optibase VideoPump 10-bit 4:2:2 Component YCbCr',
'V261': 'Lucent VX2000S',
'V422': '24 bit YUV 4:2:2 Format',
'V655': '16 bit YUV 4:2:2 Format',
'VBLE': 'MarcFD VBLE Lossless Codec',
'VCR1': 'ATI VCR 1.0',
'VCR2': 'ATI VCR 2.0',
'VCR3': 'ATI VCR 3.0',
'VCR4': 'ATI VCR 4.0',
'VCR5': 'ATI VCR 5.0',
'VCR6': 'ATI VCR 6.0',
'VCR7': 'ATI VCR 7.0',
'VCR8': 'ATI VCR 8.0',
'VCR9': 'ATI VCR 9.0',
'VDCT': 'Video Maker Pro DIB',
'VDOM': 'VDOnet VDOWave',
'VDOW': 'VDOnet VDOLive (H.263)',
'VDST': 'VirtualDub remote frameclient ICM driver',
'VDTZ': 'Darim Vison VideoTizer YUV',
'VGPX': 'VGPixel Codec',
'VIDM': 'DivX 5.0 Pro Supported Codec',
'VIDS': 'YUV 4:2:2 CCIR 601 for V422',
'VIFP': 'VIFP',
'VIV1': 'Vivo H.263',
'VIV2': 'Vivo H.263',
'VIVO': 'Vivo H.263 v2.00',
'VIXL': 'Miro Video XL',
'VLV1': 'Videologic VLCAP.DRV',
'VP30': 'On2 VP3.0',
'VP31': 'On2 VP3.1',
'VP40': 'On2 TrueCast VP4',
'VP50': 'On2 TrueCast VP5',
'VP60': 'On2 TrueCast VP6',
'VP61': 'On2 TrueCast VP6.1',
'VP62': 'On2 TrueCast VP6.2',
'VP70': 'On2 TrueMotion VP7',
'VQC1': 'Vector-quantised codec 1',
'VQC2': 'Vector-quantised codec 2',
'VR21': 'BlackMagic YUV (Quick Time)',
'VSSH': 'Vanguard VSS H.264',
'VSSV': 'Vanguard Software Solutions Video Codec',
'VSSW': 'Vanguard VSS H.264',
'VTLP': 'Alaris VideoGramPixel Codec',
'VX1K': 'VX1000S Video Codec',
'VX2K': 'VX2000S Video Codec',
'VXSP': 'VX1000SP Video Codec',
'VYU9': 'ATI Technologies YUV',
'VYUY': 'ATI Packed YUV Data',
'WBVC': 'Winbond W9960',
'WHAM': 'Microsoft Video 1 (WHAM)',
'WINX': 'Winnov Software Compression',
'WJPG': 'AverMedia Winbond JPEG',
'WMV1': 'Windows Media Video V7',
'WMV2': 'Windows Media Video V8',
'WMV3': 'Windows Media Video V9',
'WMVA': 'WMVA codec',
'WMVP': 'Windows Media Video V9',
'WNIX': 'WniWni Codec',
'WNV1': 'Winnov Hardware Compression',
'WNVA': 'Winnov hw compress',
'WRLE': 'Apple QuickTime BMP Codec',
'WRPR': 'VideoTools VideoServer Client Codec',
'WV1F': 'WV1F codec',
'WVLT': 'IllusionHope Wavelet 9/7',
'WVP2': 'WVP2 codec',
'X263': 'Xirlink H.263',
'X264': 'XiWave GNU GPL x264 MPEG-4 Codec',
'XLV0': 'NetXL Video Decoder',
'XMPG': 'Xing MPEG (I-Frame only)',
'XVID': 'XviD MPEG-4',
'XVIX': 'Based on XviD MPEG-4 codec',
'XWV0': 'XiWave Video Codec',
'XWV1': 'XiWave Video Codec',
'XWV2': 'XiWave Video Codec',
'XWV3': 'XiWave Video Codec (Xi-3 Video)',
'XWV4': 'XiWave Video Codec',
'XWV5': 'XiWave Video Codec',
'XWV6': 'XiWave Video Codec',
'XWV7': 'XiWave Video Codec',
'XWV8': 'XiWave Video Codec',
'XWV9': 'XiWave Video Codec',
'XXAN': 'XXAN',
'XYZP': 'Extended PAL format XYZ palette',
'Y211': 'YUV 2:1:1 Packed',
'Y216': 'Pinnacle TARGA CineWave YUV (Quick Time)',
'Y411': 'YUV 4:1:1 Packed',
'Y41B': 'YUV 4:1:1 Planar',
'Y41P': 'PC1 4:1:1',
'Y41T': 'PC1 4:1:1 with transparency',
'Y422': 'Y422',
'Y42B': 'YUV 4:2:2 Planar',
'Y42T': 'PCI 4:2:2 with transparency',
'Y444': 'IYU2',
'Y8 ': 'Grayscale video',
'Y800': 'Simple grayscale video',
'YC12': 'Intel YUV12 Codec',
'YMPG': 'YMPEG Alpha',
'YU12': 'ATI YV12 4:2:0 Planar',
'YU92': 'Intel - YUV',
'YUNV': 'YUNV',
'YUV2': 'Apple Component Video (YUV 4:2:2)',
'YUV8': 'Winnov Caviar YUV8',
'YUV9': 'Intel YUV9',
'YUVP': 'YCbCr 4:2:2',
'YUY2': 'Uncompressed YUV 4:2:2',
'YUYV': 'Canopus YUV',
'YV12': 'YVU12 Planar',
'YV16': 'Elecard YUV 4:2:2 Planar',
'YV92': 'Intel Smart Video Recorder YVU9',
'YVU9': 'Intel YVU9 Planar',
'YVYU': 'YVYU 4:2:2 byte ordering',
'ZLIB': 'ZLIB',
'ZPEG': 'Metheus Video Zipper',
'ZYGO': 'ZyGo Video Codec'
}
# make it fool prove
for code, value in FOURCC.items():
if not code.upper() in FOURCC:
FOURCC[code.upper()] = value
if code.endswith(' '):
FOURCC[code.strip().upper()] = value
| gpl-3.0 |
morph027/ansible-modules-extras | network/asa/asa_config.py | 22 | 11915 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: asa_config
version_added: "2.2"
author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
short_description: Manage Cisco ASA configuration sections
description:
- Cisco ASA configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ASA configuration sections in
a deterministic way.
extends_documentation_fragment: asa
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct
required: false
default: line
choices: ['line', 'block']
update:
description:
- The I(update) argument controls how the configuration statements
are processed on the remote device. Valid choices for the I(update)
argument are I(merge) and I(check). When the argument is set to
I(merge), the configuration changes are merged with the current
device running configuration. When the argument is set to I(check)
the configuration updates are determined but not actually configured
on the remote device.
required: false
default: merge
choices: ['merge', 'check']
commit:
description:
- This argument specifies the update method to use when applying the
configuration changes to the remote node. If the value is set to
I(merge) the configuration updates are merged with the running-
config. If the value is set to I(check), no changes are made to
the remote host.
required: false
default: merge
choices: ['merge', 'check']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
required: false
default: null
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config all).
required: false
default: no
choices: ['yes', 'no']
passwords:
description:
- This argument specifies to include passwords in the config
when retrieving the running-config from the remote device. This
includes passwords related to VPN endpoints. This argument is
mutually exclusive with I(defaults).
required: false
default: no
choices: ['yes', 'no']
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
authorize: yes
auth_pass: cisco
transport: cli
- asa_config:
lines:
- network-object host 10.80.30.18
- network-object host 10.80.30.19
- network-object host 10.80.30.20
parents: ['object-group network OG-MONITORED-SERVERS']
provider: "{{ cli }}"
- asa_config:
host: "{{ inventory_hostname }}"
lines:
- message-length maximum client auto
- message-length maximum 512
match: line
parents: ['policy-map type inspect dns PM-DNS', 'parameters']
authorize: yes
auth_pass: cisco
username: admin
password: cisco
context: ansible
- asa_config:
lines:
- ikev1 pre-shared-key MyS3cretVPNK3y
parents: tunnel-group 1.1.1.1 ipsec-attributes
passwords: yes
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/asa_config.2016-07-16@22:28:34
responses:
description: The set of responses from issuing the commands on the device
returned: when not check_mode
type: list
sample: ['...', '...']
"""
import re
import ansible.module_utils.asa
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
def get_config(module):
contents = module.params['config']
if not contents:
if module.params['defaults']:
include = 'defaults'
elif module.params['passwords']:
include = 'passwords'
else:
include = None
contents = module.config.get_config(include=include)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate = get_candidate(module)
if match != 'none':
config = get_config(module)
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
module.config.load_config(commands)
result['changed'] = True
if module.params['save']:
if not module.check_mode:
module.config.save_config()
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
config=dict(),
defaults=dict(type='bool', default=False),
passwords=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
mutually_exclusive = [('lines', 'src'), ('defaults', 'passwords')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = dict(changed=False)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
paukenba/youtube-dl | youtube_dl/extractor/newstube.py | 113 | 3537 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class NewstubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newstube\.ru/media/(?P<id>.+)'
_TEST = {
'url': 'http://www.newstube.ru/media/telekanal-cnn-peremestil-gorod-slavyansk-v-krym',
'info_dict': {
'id': '728e0ef2-e187-4012-bac0-5a081fdcb1f6',
'ext': 'flv',
'title': 'Телеканал CNN переместил город Славянск в Крым',
'description': 'md5:419a8c9f03442bc0b0a794d689360335',
'duration': 31.05,
},
'params': {
# rtmp download
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
video_guid = self._html_search_regex(
r'<meta property="og:video:url" content="https?://(?:www\.)?newstube\.ru/freshplayer\.swf\?guid=(?P<guid>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})',
page, 'video GUID')
player = self._download_xml(
'http://p.newstube.ru/v2/player.asmx/GetAutoPlayInfo6?state=&url=%s&sessionId=&id=%s&placement=profile&location=n2' % (url, video_guid),
video_guid, 'Downloading player XML')
def ns(s):
return s.replace('/', '/%(ns)s') % {'ns': '{http://app1.newstube.ru/N2SiteWS/player.asmx}'}
error_message = player.find(ns('./ErrorMessage'))
if error_message is not None:
raise ExtractorError('%s returned error: %s' % (self.IE_NAME, error_message.text), expected=True)
session_id = player.find(ns('./SessionId')).text
media_info = player.find(ns('./Medias/MediaInfo'))
title = media_info.find(ns('./Name')).text
description = self._og_search_description(page)
thumbnail = media_info.find(ns('./KeyFrame')).text
duration = int(media_info.find(ns('./Duration')).text) / 1000.0
formats = []
for stream_info in media_info.findall(ns('./Streams/StreamInfo')):
media_location = stream_info.find(ns('./MediaLocation'))
if media_location is None:
continue
server = media_location.find(ns('./Server')).text
app = media_location.find(ns('./App')).text
media_id = stream_info.find(ns('./Id')).text
quality_id = stream_info.find(ns('./QualityId')).text
name = stream_info.find(ns('./Name')).text
width = int(stream_info.find(ns('./Width')).text)
height = int(stream_info.find(ns('./Height')).text)
formats.append({
'url': 'rtmp://%s/%s' % (server, app),
'app': app,
'play_path': '01/%s' % video_guid.upper(),
'rtmp_conn': ['S:%s' % session_id, 'S:%s' % media_id, 'S:n2'],
'page_url': url,
'ext': 'flv',
'format_id': quality_id,
'format_note': name,
'width': width,
'height': height,
})
self._sort_formats(formats)
return {
'id': video_guid,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense |
orekyuu/intellij-community | python/lib/Lib/site-packages/django/db/models/query.py | 71 | 53653 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
from itertools import izip
from django.db import connections, router, transaction, IntegrityError
from django.db.models.aggregates import Aggregate
from django.db.models.fields import DateField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import signals, sql
from django.utils.copycompat import deepcopy
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(list(self._iter))
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
iter(self).next()
except StopIteration:
return False
return True
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist, e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = self.query.extra_select.keys()
aggregate_select = self.query.aggregate_select.keys()
only_load = self.query.get_loaded_field_names()
if not fill_cache:
fields = self.model._meta.fields
pk_idx = self.model._meta.pk_index()
index_start = len(extra_select)
aggregate_start = index_start + len(self.model._meta.fields)
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if only_load:
for field, model in self.model._meta.get_fields_with_model():
if model is None:
model = self.model
if field == self.model._meta.pk:
# Record the index of the primary key when it is found
pk_idx = len(load_fields)
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialise
# via keyword arguments.
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model, skip)
# Cache db and model outside the loop
db = self.db
model = self.model
compiler = self.query.get_compiler(using=db)
for row in compiler.results_iter():
if fill_cache:
obj, _ = get_cached_row(model, row,
index_start, using=db, max_depth=max_depth,
requested=requested, offset=len(aggregate_select),
only_load=only_load)
else:
if skip:
row_data = row[index_start:aggregate_start]
pk_val = row_data[pk_idx]
obj = model_cls(**dict(zip(init_list, row_data)))
else:
# Omit aggregates in object creation.
obj = model(*row[index_start:aggregate_start])
# Store the source database of the object
obj._state.db = db
# This object came from the database; it's not being added.
obj._state.adding = False
if extra_select:
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
# Add the aggregates to the model
if aggregate_select:
for i, aggregate in enumerate(aggregate_select):
setattr(obj, aggregate, row[i+aggregate_start])
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
for arg in args:
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=True)
return query.get_aggregation(using=self.db)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist("%s matching query does not exist."
% self.model._meta.object_name)
raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s"
% (self.model._meta.object_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
try:
self._for_write = True
return self.get(**kwargs), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
sid = transaction.savepoint(using=self.db)
obj.save(force_insert=True, using=self.db)
transaction.savepoint_commit(sid, using=self.db)
return obj, True
except IntegrityError, e:
transaction.savepoint_rollback(sid, using=self.db)
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
raise e
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.add_ordering('-%s' % latest_by)
return obj.get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
assert isinstance(id_list, (tuple, list, set, frozenset)), \
"in_bulk() must be provided with a list of IDs."
if not id_list:
return {}
qs = self._clone()
qs.query.add_filter(('pk__in', id_list))
return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_related = False
del_query.query.clear_ordering()
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
rows = query.get_compiler(self.db).execute_sql(None)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(None)
_update.alters_data = True
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def none(self):
"""
Returns an empty QuerySet.
"""
return self._clone(klass=EmptyQuerySet)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_related(self, *fields, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (kwargs.keys(),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def dup_select_related(self, other):
"""
Copies the related selection status from the QuerySet 'other' to the
current QuerySet.
"""
self.query.select_related = other.query.select_related
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The %s named annotation conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
names = set(self.model._meta.get_all_field_names())
for aggregate in kwargs:
if aggregate in names:
raise ValueError("The %s annotation conflicts with a field on "
"the model." % aggregate)
obj = self._clone()
obj._setup_aggregate_query(kwargs.keys())
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=False)
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering()
obj.query.add_ordering(*field_names)
return obj
def distinct(self, true_or_false=True):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
obj = self._clone()
obj.query.distinct = true_or_false
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should excecute it's query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.model._meta.ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model)
return self._db or router.db_for_read(self.model)
###################
# PRIVATE METHODS #
###################
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db)
c._for_write = self._for_write
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(self._iter.next())
except StopIteration:
self._iter = None
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
opts = self.model._meta
if self.query.group_by is None:
field_names = [f.attname for f in opts.fields]
self.query.add_fields(field_names, False)
self.query.set_group_by()
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.aggregate_names = []
if not self.query.extra and not self.query.aggregates:
# Short cut - if there are no extra or aggregates, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if f in self.query.extra:
self.extra_names.append(f)
elif f in self.query.aggregate_select:
self.aggregate_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.fields]
self.aggregate_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.aggregate_names is not None:
self.query.set_aggregate_mask(self.aggregate_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.aggregate_names = self.aggregate_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.aggregate_names != other.aggregate_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.aggregate_names is not None:
self.aggregate_names.extend(aggregates)
self.query.set_aggregate_mask(self.aggregate_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValueQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in self.query.get_compiler(self.db).results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.aggregate_select:
for row in self.query.get_compiler(self.db).results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and aggregates.
if self._fields:
fields = list(self._fields) + filter(lambda f: f not in self._fields, aggregate_names)
else:
fields = names
for row in self.query.get_compiler(self.db).results_iter():
data = dict(zip(names, row))
yield tuple([data[f] for f in fields])
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
self.query.add_date_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None):
super(EmptyQuerySet, self).__init__(model, query, using)
self._result_cache = []
def __and__(self, other):
return self._clone()
def __or__(self, other):
return other._clone()
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, setup=False, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, setup=setup, **kwargs)
c._result_cache = []
return c
def iterator(self):
# This slightly odd construction is because we need an empty generator
# (it raises StopIteration immediately).
yield iter([]).next()
def all(self):
"""
Always returns EmptyQuerySet.
"""
return self
def filter(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def exclude(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def complex_filter(self, filter_obj):
"""
Always returns EmptyQuerySet.
"""
return self
def select_related(self, *fields, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def annotate(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def order_by(self, *field_names):
"""
Always returns EmptyQuerySet.
"""
return self
def distinct(self, true_or_false=True):
"""
Always returns EmptyQuerySet.
"""
return self
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Always returns EmptyQuerySet.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
return self
def reverse(self):
"""
Always returns EmptyQuerySet.
"""
return self
def defer(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def only(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def update(self, **kwargs):
"""
Don't update anything.
"""
return 0
# EmptyQuerySet is always an empty result in where-clauses (and similar
# situations).
value_annotation = False
def get_cached_row(klass, row, index_start, using, max_depth=0, cur_depth=0,
requested=None, offset=0, only_load=None, local_only=False):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
This method may be called recursively to populate deep select_related()
clauses.
Arguments:
* klass - the class to retrieve (and instantiate)
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* using - the database alias on which the query is being executed.
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determin if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* offset - the number of additional fields that are known to
exist in `row` for `klass`. This usually means the number of
annotated results on `klass`.
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* local_only - Only populate local fields. This is used when building
following reverse select-related relations
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
restricted = requested is not None
if only_load:
load_fields = only_load.get(klass)
# When we create the object, we will also be creating populating
# all the parent classes, so traverse the parent classes looking
# for fields that must be included on load.
for parent in klass._meta.get_parent_list():
fields = only_load.get(parent)
if fields:
load_fields.update(fields)
else:
load_fields = None
if load_fields:
# Handle deferred fields.
skip = set()
init_list = []
# Build the list of fields that *haven't* been requested
for field, model in klass._meta.get_fields_with_model():
if field.name not in load_fields:
skip.add(field.name)
elif local_only and model is not None:
continue
else:
init_list.append(field.attname)
# Retrieve all the requested fields
field_count = len(init_list)
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
elif skip:
klass = deferred_class_factory(klass, skip)
obj = klass(**dict(zip(init_list, fields)))
else:
obj = klass(*fields)
else:
# Load all fields on klass
if local_only:
field_names = [f.attname for f in klass._meta.local_fields]
else:
field_names = [f.attname for f in klass._meta.fields]
field_count = len(field_names)
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
else:
obj = klass(**dict(zip(field_names, fields)))
# If an object was retrieved, set the database state.
if obj:
obj._state.db = using
obj._state.adding = False
index_end = index_start + field_count + offset
# Iterate over each related object, populating any
# select_related() fields
for f in klass._meta.fields:
if not select_related_descend(f, restricted, requested):
continue
if restricted:
next = requested[f.name]
else:
next = None
# Recursively retrieve the data for the related object
cached_row = get_cached_row(f.rel.to, row, index_end, using,
max_depth, cur_depth+1, next, only_load=only_load)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the base object exists, populate the
# descriptor cache
setattr(obj, f.get_cache_name(), rel_obj)
if f.unique and rel_obj is not None:
# If the field is unique, populate the
# reverse descriptor cache on the related object
setattr(rel_obj, f.related.get_cache_name(), obj)
# Now do the same, but for reverse related objects.
# Only handle the restricted case - i.e., don't do a depth
# descent into reverse relations unless explicitly requested
if restricted:
related_fields = [
(o.field, o.model)
for o in klass._meta.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
next = requested[f.related_query_name()]
# Recursively retrieve the data for the related object
cached_row = get_cached_row(model, row, index_end, using,
max_depth, cur_depth+1, next, only_load=only_load, local_only=True)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the field is unique, populate the
# reverse descriptor cache
setattr(obj, f.related.get_cache_name(), rel_obj)
if rel_obj is not None:
# If the related object exists, populate
# the descriptor cache.
setattr(rel_obj, f.get_cache_name(), obj)
# Now populate all the non-local field values
# on the related object
for rel_field,rel_model in rel_obj._meta.get_fields_with_model():
if rel_model is not None:
setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
# populate the field cache for any related object
# that has already been retrieved
if rel_field.rel:
try:
cached_obj = getattr(obj, rel_field.get_cache_name())
setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
except AttributeError:
# Related object hasn't been cached yet
pass
return obj, index_end
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None):
self.raw_query = raw_query
self.model = model
self._db = using
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def __iter__(self):
# Mapping of attrnames to row column positions. Used for constructing
# the model using kwargs, needed when not all model's fields are present
# in the query.
model_init_field_names = {}
# A list of tuples of (column name, column position). Used for
# annotation fields.
annotation_fields = []
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
need_resolv_columns = hasattr(compiler, 'resolve_columns')
query = iter(self.query)
# Find out which columns are model's fields, and which ones should be
# annotated to the model.
for pos, column in enumerate(self.columns):
if column in self.model_fields:
model_init_field_names[self.model_fields[column].attname] = pos
else:
annotation_fields.append((column, pos))
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_field_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
# All model's fields are present in the query. So, it is possible
# to use *args based model instantation. For each field of the model,
# record the query column position matching that field.
model_init_field_pos = []
for field in self.model._meta.fields:
model_init_field_pos.append(model_init_field_names[field.attname])
if need_resolv_columns:
fields = [self.model_fields.get(c, None) for c in self.columns]
# Begin looping through the query values.
for values in query:
if need_resolv_columns:
values = compiler.resolve_columns(values, fields)
# Associate fields to values
if skip:
model_init_kwargs = {}
for attname, pos in model_init_field_names.iteritems():
model_init_kwargs[attname] = values[pos]
instance = model_cls(**model_init_kwargs)
else:
model_init_args = [values[pos] for pos in model_init_field_pos]
instance = model_cls(*model_init_args)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
instance._state.db = db
instance._state.adding = False
yield instance
def __repr__(self):
return "<RawQuerySet: %r>" % (self.raw_query % self.params)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model)
def using(self, alias):
"""
Selects which database this Raw QuerySet should excecute it's query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existant column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
def insert_query(model, values, return_id=False, raw_values=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented. It is not
part of the public API.
"""
query = sql.InsertQuery(model)
query.insert_values(values, raw_values)
return query.get_compiler(using=using).execute_sql(return_id)
| apache-2.0 |
sv-dev1/odoo | addons/analytic_user_function/__openerp__.py | 260 | 2015 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Jobs on Contracts',
'version': '1.0',
'category': 'Sales Management',
'description': """
This module allows you to define what is the default function of a specific user on a given account.
====================================================================================================
This is mostly used when a user encodes his timesheet: the values are retrieved
and the fields are auto-filled. But the possibility to change these values is
still available.
Obviously if no data has been recorded for the current account, the default
value is given as usual by the employee data so that this module is perfectly
compatible with older configurations.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['hr_timesheet_sheet'],
'data': ['analytic_user_function_view.xml', 'security/ir.model.access.csv'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lfranchi/zulip | zerver/management/commands/rename_stream.py | 115 | 1189 | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_rename_stream
from zerver.models import Realm, get_realm
import sys
class Command(BaseCommand):
help = """Change the stream name for a realm."""
def add_arguments(self, parser):
parser.add_argument('domain', metavar='<domain>', type=str,
help="domain to operate on")
parser.add_argument('old_name', metavar='<old name>', type=str,
help='name of stream to be renamed')
parser.add_argument('new_name', metavar='<new name>', type=str,
help='new name to rename the stream to')
def handle(self, *args, **options):
domain = options['domain']
old_name = options['old_name']
new_name = options['new_name']
encoding = sys.getfilesystemencoding()
try:
realm = get_realm(domain)
except Realm.DoesNotExist:
print "Unknown domain %s" % (domain,)
exit(1)
do_rename_stream(realm, old_name.decode(encoding),
new_name.decode(encoding))
| apache-2.0 |
LukeM12/samba | lib/subunit/python/subunit/tests/test_progress_model.py | 83 | 3902 | #
# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
import unittest
import subunit
from subunit.progress_model import ProgressModel
class TestProgressModel(unittest.TestCase):
def assertProgressSummary(self, pos, total, progress):
"""Assert that a progress model has reached a particular point."""
self.assertEqual(pos, progress.pos())
self.assertEqual(total, progress.width())
def test_new_progress_0_0(self):
progress = ProgressModel()
self.assertProgressSummary(0, 0, progress)
def test_advance_0_0(self):
progress = ProgressModel()
progress.advance()
self.assertProgressSummary(1, 0, progress)
def test_advance_1_0(self):
progress = ProgressModel()
progress.advance()
self.assertProgressSummary(1, 0, progress)
def test_set_width_absolute(self):
progress = ProgressModel()
progress.set_width(10)
self.assertProgressSummary(0, 10, progress)
def test_set_width_absolute_preserves_pos(self):
progress = ProgressModel()
progress.advance()
progress.set_width(2)
self.assertProgressSummary(1, 2, progress)
def test_adjust_width(self):
progress = ProgressModel()
progress.adjust_width(10)
self.assertProgressSummary(0, 10, progress)
progress.adjust_width(-10)
self.assertProgressSummary(0, 0, progress)
def test_adjust_width_preserves_pos(self):
progress = ProgressModel()
progress.advance()
progress.adjust_width(10)
self.assertProgressSummary(1, 10, progress)
progress.adjust_width(-10)
self.assertProgressSummary(1, 0, progress)
def test_push_preserves_progress(self):
progress = ProgressModel()
progress.adjust_width(3)
progress.advance()
progress.push()
self.assertProgressSummary(1, 3, progress)
def test_advance_advances_substack(self):
progress = ProgressModel()
progress.adjust_width(3)
progress.advance()
progress.push()
progress.adjust_width(1)
progress.advance()
self.assertProgressSummary(2, 3, progress)
def test_adjust_width_adjusts_substack(self):
progress = ProgressModel()
progress.adjust_width(3)
progress.advance()
progress.push()
progress.adjust_width(2)
progress.advance()
self.assertProgressSummary(3, 6, progress)
def test_set_width_adjusts_substack(self):
progress = ProgressModel()
progress.adjust_width(3)
progress.advance()
progress.push()
progress.set_width(2)
progress.advance()
self.assertProgressSummary(3, 6, progress)
def test_pop_restores_progress(self):
progress = ProgressModel()
progress.adjust_width(3)
progress.advance()
progress.push()
progress.adjust_width(1)
progress.advance()
progress.pop()
self.assertProgressSummary(1, 3, progress)
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
result = loader.loadTestsFromName(__name__)
return result
| gpl-3.0 |
sigmavirus24/pip | pip/_vendor/distlib/scripts.py | 333 | 15224 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
def _enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = _enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
StrellaGroup/erpnext | erpnext/utilities/product.py | 9 | 4817 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, fmt_money, flt, nowdate, getdate
from erpnext.accounts.doctype.pricing_rule.pricing_rule import get_pricing_rule_for_item
from erpnext.stock.doctype.batch.batch import get_batch_qty
def get_qty_in_stock(item_code, item_warehouse_field, warehouse=None):
in_stock, stock_qty = 0, ''
template_item_code, is_stock_item = frappe.db.get_value("Item", item_code, ["variant_of", "is_stock_item"])
if not warehouse:
warehouse = frappe.db.get_value("Item", item_code, item_warehouse_field)
if not warehouse and template_item_code and template_item_code != item_code:
warehouse = frappe.db.get_value("Item", template_item_code, item_warehouse_field)
if warehouse:
stock_qty = frappe.db.sql("""
select GREATEST(S.actual_qty - S.reserved_qty - S.reserved_qty_for_production - S.reserved_qty_for_sub_contract, 0) / IFNULL(C.conversion_factor, 1)
from tabBin S
inner join `tabItem` I on S.item_code = I.Item_code
left join `tabUOM Conversion Detail` C on I.sales_uom = C.uom and C.parent = I.Item_code
where S.item_code=%s and S.warehouse=%s""", (item_code, warehouse))
if stock_qty:
stock_qty = adjust_qty_for_expired_items(item_code, stock_qty, warehouse)
in_stock = stock_qty[0][0] > 0 and 1 or 0
return frappe._dict({"in_stock": in_stock, "stock_qty": stock_qty, "is_stock_item": is_stock_item})
def adjust_qty_for_expired_items(item_code, stock_qty, warehouse):
batches = frappe.get_all('Batch', filters=[{'item': item_code}], fields=['expiry_date', 'name'])
expired_batches = get_expired_batches(batches)
stock_qty = [list(item) for item in stock_qty]
for batch in expired_batches:
if warehouse:
stock_qty[0][0] = max(0, stock_qty[0][0] - get_batch_qty(batch, warehouse))
else:
stock_qty[0][0] = max(0, stock_qty[0][0] - qty_from_all_warehouses(get_batch_qty(batch)))
if not stock_qty[0][0]:
break
return stock_qty
def get_expired_batches(batches):
"""
:param batches: A list of dict in the form [{'expiry_date': datetime.date(20XX, 1, 1), 'name': 'batch_id'}, ...]
"""
return [b.name for b in batches if b.expiry_date and b.expiry_date <= getdate(nowdate())]
def qty_from_all_warehouses(batch_info):
"""
:param batch_info: A list of dict in the form [{u'warehouse': u'Stores - I', u'qty': 0.8}, ...]
"""
qty = 0
for batch in batch_info:
qty = qty + batch.qty
return qty
def get_price(item_code, price_list, customer_group, company, qty=1):
template_item_code = frappe.db.get_value("Item", item_code, "variant_of")
if price_list:
price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"],
filters={"price_list": price_list, "item_code": item_code})
if template_item_code and not price:
price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"],
filters={"price_list": price_list, "item_code": template_item_code})
if price:
pricing_rule = get_pricing_rule_for_item(frappe._dict({
"item_code": item_code,
"qty": qty,
"transaction_type": "selling",
"price_list": price_list,
"customer_group": customer_group,
"company": company,
"conversion_rate": 1,
"for_shopping_cart": True,
"currency": frappe.db.get_value("Price List", price_list, "currency")
}))
if pricing_rule:
if pricing_rule.pricing_rule_for == "Discount Percentage":
price[0].price_list_rate = flt(price[0].price_list_rate * (1.0 - (flt(pricing_rule.discount_percentage) / 100.0)))
if pricing_rule.pricing_rule_for == "Rate":
price[0].price_list_rate = pricing_rule.price_list_rate
price_obj = price[0]
if price_obj:
price_obj["formatted_price"] = fmt_money(price_obj["price_list_rate"], currency=price_obj["currency"])
price_obj["currency_symbol"] = not cint(frappe.db.get_default("hide_currency_symbol")) \
and (frappe.db.get_value("Currency", price_obj.currency, "symbol", cache=True) or price_obj.currency) \
or ""
uom_conversion_factor = frappe.db.sql("""select C.conversion_factor
from `tabUOM Conversion Detail` C
inner join `tabItem` I on C.parent = I.name and C.uom = I.sales_uom
where I.name = %s""", item_code)
uom_conversion_factor = uom_conversion_factor[0][0] if uom_conversion_factor else 1
price_obj["formatted_price_sales_uom"] = fmt_money(price_obj["price_list_rate"] * uom_conversion_factor, currency=price_obj["currency"])
if not price_obj["price_list_rate"]:
price_obj["price_list_rate"] = 0
if not price_obj["currency"]:
price_obj["currency"] = ""
if not price_obj["formatted_price"]:
price_obj["formatted_price"] = ""
return price_obj
| gpl-3.0 |
rkoschmitzky/coconodz | etc/maya/applib.py | 1 | 4161 | import pymel.core as pmc
def get_attribute_tree(node):
""" traverses attributes on given node and gets dict in form the attribute tree widget expects
Args:
node: PyNode
Returns: dict
"""
parents = {}
for attr in node.listAttr():
if not attr.isChild() and not attr.isMulti():
# add children
if not attr.longName() in parents:
parents[attr.longName()] = []
for _attr in attr.iterDescendants():
parents[attr.longName()].append(_attr.longName())
# for some reason the iterDescentants method is not returning the proper children
elif attr.isMulti():
parents[attr.longName()] = [_attr.longName() for _attr in attr.children()]
return parents
def get_used_attribute_type(attribute):
""" gets the currently used attribute type
Args:
attribute:
Returns: Corresponding our attribute naming it will return "socket" if an attribute
has incoming and outgoing connections, "plug" if it
"""
sources = attribute.listConnections(s=True, d=False)
destinations = attribute.listConnections(s=False, d=True)
if sources and destinations:
return "slot"
elif sources:
return "socket"
else:
return "plug"
def get_connected_attributes_in_node_tree(node_or_nodes, node_types=None):
""" gets all attributes, its type, the node_type and data_type
Args:
node_or_nodes: all connected attributes belonging to the node or nodes
node_types: if unspecified it will only add the node of the given node_types
Returns: dict {attribute: {"node_type": "some_node_type",
"data_type": "some_data_type",
"type": "some_attribute_type"
}
}
"""
# find all nodes connected in tree and remove doubled
tree_nodes = list(set(pmc.listHistory(node_or_nodes, f=True, ac=True) + pmc.listHistory(node_or_nodes, ac=True)))
all_connected_attributes = []
# checks if the attribute is a relevant attribute by checking
# the node types of the nodes connected to it
def _check_node_type(attribute):
if node_types:
is_relevant = False
if attribute.nodeType() in node_types:
dependencies = attribute.connections(p=True)
if dependencies:
for dependency in dependencies:
if not is_relevant and dependency.nodeType() in node_types:
is_relevant = True
if is_relevant:
all_connected_attributes.append(attribute)
else:
all_connected_attributes.append(attribute)
# based on all nodes in tree get all related attributes
# do the filtering and check if the attribute is relevant
for connection in pmc.listConnections(tree_nodes, c=True, p=True):
source, destination = connection
if source not in all_connected_attributes:
_check_node_type(source)
if destination not in all_connected_attributes:
_check_node_type(destination)
# subdict skeleton every keys value in attribute should have
subdict = {"node_type": None,
"data_type": None,
"type": None}
attribute_dict = {}
for attribute in all_connected_attributes:
_ = subdict.copy()
_["node_type"] = attribute.nodeType()
_["data_type"] = attribute.type()
_["type"] = get_used_attribute_type(attribute)
attribute_dict[attribute.name()] = _
return attribute_dict
def get_connections(node_or_nodes):
""" gets all connections for a single or multiple nodes
Args:
node_or_nodes: node name or PyNode instance (list)
Returns: dict {slot: slot}
"""
# find all nodes connected in tree and remove doubled
tree_nodes = list(set(pmc.listHistory(node_or_nodes, f=True, ac=True) + pmc.listHistory(node_or_nodes, ac=True)))
return {str(x): str(y) for x, y in pmc.listConnections(tree_nodes, c=True, p=True, s=False)}
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.