text
stringlengths 2
999k
|
|---|
import json
import traceback
from copy import deepcopy
from alarm.config import HUPUN_SYNC_ABNORMAL_ROBOT
from alarm.helper import Helper
from alarm.page.ding_talk import DingTalk
from cookie.config import ROBOT_TOKEN
from hupun.config import HUPUN_SEARCH_DAYS
from hupun.model.es.purchase_order_close_msg import PurOrderCloseMsg
from hupun_slow_crawl.model.es.supplier import Supplier
from hupun.page.purchase_order_goods import PurchaseOrderGoods
from hupun.page.purchase_order_goods_result import PurOrderGoodsResult
from hupun.page.purchase_order_query_result import POrderQueResult
from hupun_api.page.purchase_order_close import PurchaseOrderClose
from hupun_operator.page.purchase.close_audit import CloseAudit
from mq_handler.base import Base
from pyspider.helper.date import Date
from pyspider.core.model.storage import default_storage_redis
from pyspider.helper.email import EmailSender
from pyspider.helper.string import merge_str
class PurOrderClose(Base):
"""
关闭采购跟单(采购单里的商品)
"""
ClOSE_FAIL_TIMES_KEY = 'hupun_purchase_close_fails' # redis中关闭订单失败次数的key
# 采购订单不能关闭的状态值
PUR_COMPLETE = 3 # 已完成
PUR_CLOSED = 4 # 已关闭
# 采购跟单的状态值
NOT_ARRIVED = 0 # 未到货
PARTIAL_ARRIVED = 1 # 部分到货
FULL_ARRIVED = 2 # 全部到货, 不能操作关闭
CLOSED = 3 # 已关闭, 不能操作关闭
# 关闭备注
close_remark = 'spider-close-mark'
def execute(self):
print('关闭采购订单')
self.print_basic_info()
# 获取数据
bill_code = self._data.get('erpPurchaseNo')
data_list = self._data.get('list')
form_id = self._data.get('formId')
error_msg_str = ''
sku_barcode_list = set() # 失败的sku合集
sku_barcode_success_list = set() # 成功的sku合集
sku_invalid_close_list = [self.FULL_ARRIVED, self.CLOSED]
purchase_bill_status = False # 在外部的是否是能够被关闭的采购单的状态,已完成和已关闭的状态是不能被关闭的状态,默认未关闭
compared_pur_sku_status = False # 判断采购跟单是否有对不上的数据,有则为True,默认是正常的,False,对得上
def send_success_msg(bill_uid):
"""
发送成功之后的数据
:param bill_uid:
:return:
"""
re_data = {
'formId': self._data.get('formId'),
'bill_code': bill_code,
'skuBarcodeFailed': list(sku_barcode_list),
'skuBarcodeSuccessed': list(sku_barcode_success_list),
}
self.send_call_back_msg(error_msg_str, data_id=self._data_id, status=0, return_data=re_data)
# 当正常关闭采购订单后,将连续失败次数重置为0
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, 0)
# 保存发过来的原始数据
save_list = list()
for _data in data_list:
save_data = dict()
save_data['sku_barcode'] = _data.get('skuBarcode')
save_data['form_id'] = form_id
save_data['bill_code'] = bill_code
save_data['supplier_code'] = _data.get('supplierCode')
save_data['purchase_count'] = _data.get('purchaseCount')
save_data['arrive_count'] = _data.get('arriveCount')
save_data['close_count'] = _data.get('closeCount')
save_data['check_status'] = False
save_list.append(save_data)
PurOrderCloseMsg().update(save_list, async=True)
# 更新已关闭采购订单的数据
print('更新的采购订单: {}'.format(bill_uid))
PurchaseOrderGoods(bill_uid) \
.set_priority(PurchaseOrderGoods.CONST_PRIORITY_BUNDLED) \
.use_cookie_pool() \
.enqueue()
print('开始获取数据')
try:
# 从redis获取的之前连续失败的次数,如大于等于9则告警并将该值重新置零。
fail_times = default_storage_redis.get(self.ClOSE_FAIL_TIMES_KEY)
if not fail_times:
print('错误,未获取到fail_times的值!')
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, 0)
fail_times = 0
else:
fail_times = int(fail_times)
if fail_times >= 9:
ding_msg = '已连续10次未正确关闭erp,请及时上线检查。'
print(ding_msg)
title = '关闭采购订单的程序警告'
DingTalk(ROBOT_TOKEN, title, ding_msg).enqueue()
fail_times = 0
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, 0)
data_length = len(data_list)
for _d in data_list:
sku_barcode_list.add(_d['skuBarcode'])
print('同步获取采购单: {}的数据'.format(bill_code))
pur_order_query_obj = POrderQueResult(bill_code) \
.set_start_time(Date.now().plus_days(-HUPUN_SEARCH_DAYS).format()) \
.use_cookie_pool()
status, result = Helper().get_sync_result(pur_order_query_obj)
if status == 1:
raise Exception(result)
if result:
result = result[0]
bill_uid = result.get('bill_uid', '')
sku_result = PurOrderGoodsResult(bill_uid) \
.use_cookie_pool() \
.get_result(retry_limit=3, retry_interval=30)
if not sku_result:
err_msg = '采购单:{} 没有抓取到sku级别的采购跟单'.format(bill_code)
print(err_msg)
error_msg_str += ';{}'.format(err_msg)
print(error_msg_str)
data = {
'formId': self._data.get('formId'),
'bill_code': bill_code,
'skuBarcodeFailed': list(sku_barcode_list),
'skuBarcodeSuccessed': list(sku_barcode_success_list),
}
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, fail_times + 1)
return self.send_call_back_msg(error_msg_str, data_id=self._data_id, return_data=data)
# 有数据,获取采购单的sku级别的数据
print('有数据,获取采购单的sku级别的数据')
if len(sku_result) < data_length:
err_msg = '关闭采购单: {} 失败, 因为传入到采购跟单数量是: {} ,大于爬虫抓取的数量: {}'.format(
bill_code, data_length, len(sku_result))
print(err_msg)
error_msg_str += ';{}'.format(err_msg)
print(error_msg_str)
data = {
'formId': self._data.get('formId'),
'bill_code': bill_code,
'skuBarcodeFailed': list(sku_barcode_list),
'skuBarcodeSuccessed': list(sku_barcode_success_list),
}
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, fail_times + 1)
return self.send_call_back_msg(error_msg_str, data_id=self._data_id, return_data=data)
# 判断采购单是否已经被关闭,是则跳过
if int(result.get('status', 0)) == self.PUR_CLOSED or int(result.get('status', 0)) == self.PUR_COMPLETE:
print('采购单:{}已经被关闭,设置该采购单的关闭状态'.format(bill_code))
purchase_bill_status = True
# 判断需要关闭的采购单是否满足 (整单关闭) 的条件, True: 可以整单关闭
whole_close_status = self.full_close_purchase(data_list, sku_result)
for input_data in data_list:
print('skubarcode: {}'.format(input_data.get('skuBarcode')))
supplier_code = input_data.get('supplierCode')
# 查询供应商名称
supplier_name = Supplier().find_supplier_name_by_code(supplier_code)
if not supplier_name:
err_msg = "未查找到供应商: {}".format(supplier_code)
print(err_msg)
error_msg_str += ';{}'.format(err_msg)
print(error_msg_str)
sku_barcode_list.add(input_data['skuBarcode'])
data = {
'formId': self._data.get('formId'),
'bill_code': bill_code,
'skuBarcodeFailed': list(sku_barcode_list),
'skuBarcodeSuccessed': list(sku_barcode_success_list),
}
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, fail_times + 1)
return self.send_call_back_msg(error_msg_str, data_id=self._data_id, return_data=data)
input_data['supplierName'] = supplier_name
sku_operate_data = {}
for sku_data in sku_result:
if input_data['skuBarcode'] == sku_data['spec_code']:
sku_operate_data = sku_data
if not sku_operate_data:
err_msg = '没有找到对应商品编码sku:{} 的采购跟单'.format(input_data['skuBarcode'])
print(err_msg)
error_msg_str += ';{}'.format(err_msg)
print(error_msg_str)
sku_barcode_list.add(input_data['skuBarcode'])
continue
# 比较采购单的sku级别的数据和传入的关闭采购跟单数据
print('比较采购单的sku级别的数据和传入的关闭采购跟单:sku:{}的数据'.format(input_data['skuBarcode']))
status, msg = self.compare_close_purchase(input_data, sku_operate_data)
if status == 1:
err_msg = '抓取的数据字段:{}对不上;'.format(','.join(msg))
print(err_msg)
error_msg_str += '{}'.format(err_msg)
print(error_msg_str)
sku_barcode_list.add(input_data['skuBarcode'])
compared_pur_sku_status = True
continue
# 如果采购单在外部已经被关闭,跳过跟单的关闭
if purchase_bill_status:
print('采购单在外部已经被关闭,跳过跟单:{}的关闭'.format(input_data['skuBarcode']))
sku_barcode_list.remove(input_data['skuBarcode'])
sku_barcode_success_list.add(input_data['skuBarcode'])
continue
# 跳过已关闭的采购跟单
if int(sku_operate_data.get('status', 0)) in sku_invalid_close_list:
print('商品sku:{}对应的采购跟单已关闭或者全部到货,跳过'.format(input_data['skuBarcode']))
sku_barcode_list.remove(input_data['skuBarcode'])
sku_barcode_success_list.add(input_data['skuBarcode'])
continue
# 正式关闭采购跟单前,判断是整单关闭还是逐条关闭
if whole_close_status:
sku_barcode_list.remove(input_data['skuBarcode'])
sku_barcode_success_list.add(input_data['skuBarcode'])
continue
# 关闭对应的sku单
# 数据通过,删除数据
result_copy = deepcopy(result)
result_details_copy = deepcopy(result)
result_two_details = deepcopy(sku_operate_data)
sku_result_details_copy = deepcopy(sku_result)
result_two_whole_details = []
sku_result_whole_copy = deepcopy(sku_result)
for _r in sku_result_whole_copy:
if _r['spec_code'] == sku_operate_data['spec_code']:
continue
_r['$dataType'] = "v:purchase.bill$pchs_detail"
_r['$entityId'] = "0"
result_two_whole_details.append(_r)
result_two_details['$dataType'] = "v:purchase.bill$pchs_detail"
result_two_details['$state'] = 2
result_two_details['$entityId'] = "0"
result_two_details['$oldData'] = sku_operate_data
expecte_date = result_two_details['expecte_date']
result_two_details['expecte_date'] = expecte_date + 'T00:00:00Z' if isinstance(
expecte_date, str) and expecte_date else expecte_date
print('result_two_details: {}'.format(result_two_details['expecte_date']))
result_two_details['status'] = 3
result_copy['$dataType'] = "v:purchase.bill$purchaseBill"
result_copy['$state'] = 2
result_copy['$entityId'] = "0"
result_copy['bill_date'] = result_copy['bill_date'] + 'T00:00:00Z'
print('result_copy:{}'.format(result_copy['bill_date']))
result_copy['details'] = {
"$isWrapper": True,
"$dataType": "v:purchase.bill$[pchs_detail]",
"data": [result_two_details] + result_two_whole_details
}
result_details_copy['details'] = sku_result_details_copy
result_copy['$oldData'] = result_details_copy
sku_re_copy = deepcopy(sku_operate_data)
sku_re_copy['$dataType'] = "v:purchase.bill$pchs_detail"
sku_re_copy['$state'] = 2
sku_re_copy['$entityId'] = "0"
sku_re_date = sku_re_copy['expecte_date']
sku_re_copy['expecte_date'] = sku_re_date + 'T00:00:00Z' if isinstance(
sku_re_date, str) and sku_re_date else sku_re_date
print('sku_re_copy: {}'.format(sku_re_copy['expecte_date']))
sku_re_copy['status'] = 3
sku_re_copy['$oldData'] = sku_operate_data
# 开始关闭
close_msg = self.close_purchase_bill(bill_code, level=2, bill_data=result_copy,
close_remark=self.close_remark, bill_sku_data=sku_re_copy)
if 'error' in close_msg:
err_msg = '关闭商品编码: {} 对应的采购跟单失败,原因: {}'.format(input_data['skuBarcode'], close_msg)
error_msg_str += ';{}'.format(err_msg)
print(error_msg_str)
else:
print('关闭成功, skuBarcode:{}'.format(input_data['skuBarcode']))
sku_barcode_list.remove(input_data['skuBarcode'])
sku_barcode_success_list.add(input_data['skuBarcode'])
# 判断是否要整单关闭
if not purchase_bill_status and not compared_pur_sku_status and whole_close_status:
print("可以整单关闭采购单:{}".format(bill_code))
return_skubarcode = set()
for _d in data_list:
return_skubarcode.add(_d['skuBarcode'])
close_msg = self.close_purchase_bill(bill_code=bill_code, close_remark=self.close_remark)
if close_msg:
err_msg = '整单关闭采购单:{}失败,原因: {};'.format(bill_code, close_msg)
error_msg_str += err_msg
print(error_msg_str)
sku_barcode_list = return_skubarcode
sku_barcode_success_list.clear()
else:
print('整单关闭采购单:{}成功'.format(bill_code))
sku_barcode_list.clear()
sku_barcode_success_list = return_skubarcode
data = {
'formId': self._data.get('formId'),
'bill_code': bill_code,
'skuBarcodeFailed': list(sku_barcode_list),
'skuBarcodeSuccessed': list(sku_barcode_success_list),
}
print('error_msg_str: {}'.format(error_msg_str))
if error_msg_str:
self.send_call_back_msg(error_msg_str, data_id=self._data_id, return_data=data)
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, fail_times + 1)
else:
# 发送成功关闭的消息
send_success_msg(bill_uid)
else:
# 没有数据,报警
err_msg = '没有获取到被关闭的采购单: {} 的信息'.format(bill_code)
print(err_msg)
error_msg_str += ';{}'.format(err_msg)
print(error_msg_str)
data = {
'formId': self._data.get('formId'),
'bill_code': bill_code,
'skuBarcodeFailed': list(sku_barcode_list),
'skuBarcodeSuccessed': list(sku_barcode_success_list),
}
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, fail_times + 1)
self.send_call_back_msg(error_msg_str, data_id=self._data_id, return_data=data)
except Exception as e:
print('----')
print(traceback.format_exc())
print('------')
err_msg = '关闭采购订单时发生未知异常: {}; erpPurchaseNo:{}'.format(e, self._data.get('erpPurchaseNo'))
print(err_msg)
error_msg_str += ';{}'.format(err_msg)
print(error_msg_str)
data = {
'formId': self._data.get('formId'),
'bill_code': bill_code,
'skuBarcodeFailed': list(sku_barcode_list),
'skuBarcodeSuccessed': list(sku_barcode_success_list),
}
self.send_call_back_msg(error_msg_str, data_id=self._data_id, return_data=data)
default_storage_redis.set(self.ClOSE_FAIL_TIMES_KEY, fail_times + 1)
raise e
def close_purchase_bill(self, bill_code='', level=1, bill_data='', bill_sku_data='', close_remark='', retry=3):
"""
根据采购单据编码关闭采购订单;
这个关闭的是第一层的采购订单(第一层指的是直接查询万里牛返回的面板信息)
:param bill_code:
:param level: 1,整单关闭; 2,部分关闭采购单里的商品
:param bill_data:
:param bill_sku_data:
:param close_remark: 关闭备注
:param retry: 重试次数
:return:
"""
try:
if level == 1:
result = PurchaseOrderClose() \
.set_param('bill_code', bill_code) \
.set_param('close_remark', close_remark) \
.get_result()
result = json.loads(result)
code = result.get('code')
if code != 0:
if retry > 0:
return self.close_purchase_bill(bill_code, level, bill_data, bill_sku_data, close_remark,
retry - 1)
else:
# 发送关闭采购单失败的钉钉通知
title = '关闭采购单失败报警'
text = '关闭采购单失败了: {} 次, 需要手动去万里牛关闭异常订单: {}'.format(retry, bill_code)
DingTalk(ROBOT_TOKEN, title, text).enqueue()
return result.get('message')
else:
result = CloseAudit(bill_data, bill_sku_data, remark=close_remark) \
.use_cookie_pool() \
.get_result(retry_limit=3, retry_interval=30)
return result
except Exception as e:
print('--------error traceback--------')
print(traceback.format_exc())
print('close_purchase_bill close bill error: {}'.format(e))
if retry > 0:
return self.close_purchase_bill(bill_code, level, bill_data, bill_sku_data, close_remark, retry - 1)
else:
# 发送关闭采购单失败的钉钉通知
title = '关闭采购单失败报警'
text = '关闭采购单失败了: {} 次, 需要手动去万里牛关闭异常订单: {}'.format(retry, bill_code)
DingTalk(ROBOT_TOKEN, title, text).enqueue()
return 'error关闭采购单失败,重试次数: {}'.format(retry)
def compare_close_purchase(self, input_dict, crawl_sku_dict):
"""
比较两个dict数据是否一样
:param input_dict:
:param crawl_sku_dict:
:return:
"""
lack_data = list()
if input_dict['skuBarcode'] != crawl_sku_dict.get('spec_code'):
lack_data.append('skuBarcode')
if input_dict['purchaseCount'] != int(crawl_sku_dict.get('pchs_size')):
lack_data.append('purchaseCount')
if input_dict['arriveCount'] != int(crawl_sku_dict.get('pchs_receive')):
lack_data.append('arriveCount')
if lack_data:
return 1, lack_data
else:
return 0, ''
def full_close_purchase(self, data_list: list, sku_result: list):
"""
判断是否能够整单关闭
:param data_list: 传入的采购跟单信息
:param sku_result: 查询到采购单对应的所有商品信息
:return:
"""
if len(data_list) >= len(sku_result):
# 传入数量大于或等于查询到的采购单对应商品数量,肯定就可以整单关闭
print('传入的采购跟单数量:{},查询到采购单对应的所有商品数量:{},可以整单关闭.'.format(len(data_list), len(sku_result)))
return True
whole_close = True
# 选出相同的跟单
same_items = set()
for data in data_list:
barcode = data.get('skuBarcode')
for sku in sku_result:
sku_spec_code = sku['spec_code']
if barcode == sku_spec_code:
same_items.add(barcode)
# 判断跟传入的跟单不同的商品是否都不能关闭,如果可以关闭,则代表不能整单关闭
same_items_list = list(same_items)
for _sku in sku_result:
if _sku['spec_code'] in same_items_list:
continue
sku_status = int(_sku['status'])
if sku_status != self.FULL_ARRIVED and sku_status != self.CLOSED:
whole_close = False
return whole_close
def send_call_back_msg(self, err_msg='', data_id='', status=1, return_data=''):
"""
发送返回的消息数据
:return:
"""
return_msg = {
"code": status, # 0:成功 1:失败
"errMsg": err_msg, # 如果code为1,请将失败的具体原因返回
"data": return_data
}
# 发送失败的消息
if status != 0 and Helper.in_project_env():
# 发送失败的消息
title = '万里牛采购跟单关闭失败'
ding_msg = '万里牛采购跟单关闭失败原因: {}, 天鸽采购跟单号: {}, 万里牛采购单号: {}'.format(
err_msg, merge_str('CGGD', self._data.get('formId'), dividing=''), return_data.get('bill_code'))
DingTalk(HUPUN_SYNC_ABNORMAL_ROBOT, title, ding_msg).enqueue()
# 同时发送邮件通知
if self._data.get('email'):
EmailSender() \
.set_receiver(self._data.get('email')) \
.set_mail_title(title) \
.set_mail_content(ding_msg) \
.send_email()
print('发送返回的消息数据: {}'.format(return_msg))
from mq_handler import CONST_MESSAGE_TAG_PURCHARSE_CLOSE_RE
from mq_handler import CONST_ACTION_UPDATE
from pyspider.libs.mq import MQ
msg_tag = CONST_MESSAGE_TAG_PURCHARSE_CLOSE_RE
return_date = Date.now().format()
MQ().publish_message(msg_tag, return_msg, data_id, return_date, CONST_ACTION_UPDATE)
|
#!/usr/bin/python3
import xml.etree.ElementTree as et
from optparse import OptionParser
client_nodes = [
'node9', 'node10', 'node11', 'node12',
'node13', 'node14', 'node15', 'node16',
];
parser = OptionParser()
parser.add_option("-n", "--clients_per_server", dest="clients_per_server",
help="the number of clients on each client machine",
default=1, metavar="CPS")
(options, args) = parser.parse_args()
cps = int(options.clients_per_server)
conf = et.parse('config/tpcc/pdi_deptran_tmpl.xml')
clients = conf.find('clients')
# generate client ids for each node
for i, node in enumerate(client_nodes):
if cps == 1:
cli_se = et.SubElement(clients, 'client',
{"id":"{:02}".format(i),"threads":"1"})
else:
cli_se = et.SubElement(clients, 'client',
{"id":"{:02}-{:02}".format(cps * i, cps * (i + 1) - 1),"threads":"1"})
cli_se.text = node
# update the total number of clients
clients.set("number", "{}".format(cps * len(client_nodes)))
conf.write('new.xml')
|
from past.types import basestring
class PropFind:
def __init__(self, matches):
self.matches = matches
self.search = None
def clear(self):
search = list()
for match in self.matches:
d = dict()
d["values"] = match
d["found"] = False
d["props"] = []
search.append(d)
self.search = search
def found(self):
for match in self.search:
if not match["found"]:
return False
return True
@staticmethod
def sub_props(props):
result = dict()
for key in props:
if key != "listOptions":
result[key] = props[key]
return result
@staticmethod
def match_up(prop_value, match_value, criteria):
if not isinstance(prop_value, basestring):
return False
if not isinstance(match_value, basestring):
return False
value = prop_value.lower()
match_value = match_value.lower()
if criteria == "equal":
return value == match_value
return match_value in value
@staticmethod
def sub_match(match):
result = dict()
for key in match:
if key != "criteria":
result[key] = match[key]
return result
@staticmethod
def sub_match_any_prop(props, match, criteria):
for key in props:
if PropFind.match_up(props[key], match["*"], criteria):
return True
return False
@staticmethod
def sub_match_all_prop(props, match, criteria):
for key in PropFind.sub_match(match):
if not PropFind.match_up(props[key], match[key], criteria):
return False
return True
@staticmethod
def sub_match_prop(props, match):
criteria = None
if "criteria" in match.keys():
criteria = match["criteria"]
if "*" in match.keys():
return PropFind.sub_match_any_prop(props, match, criteria)
else:
return PropFind.sub_match_all_prop(props, match, criteria)
@staticmethod
def match_prop(props, match):
for sub_match in match:
if not PropFind.sub_match_prop(props, sub_match):
return False
return True
@staticmethod
def prop_matched(props, matched):
for m in matched:
if m["name"] == props["name"] and m["defaultValue"] == props["defaultValue"]:
return True
return False
def find(self, props):
if "model" not in props.keys() or props["model"] != "string":
return
props = PropFind.sub_props(props)
for match in self.search:
if self.match_prop(props, match["values"]):
match["found"] = True
if not self.prop_matched(props, match["props"]):
match["props"].append(props)
|
# DADSA - Assignment 1
# Reece Benson
class Menu():
# Define the variables we will be using
menu = None
def __init__(self):
#TODO: Define our Menu
self.menu = {
'Item 1': {
'Sub Item 1': self.load_action,
'Sub Item 2': self.load_action
},
'Item 2': {
'Sub Item 1': self.load_action,
'Sub Item 2': self.load_action,
'Sub Item 3': self.load_action,
'Sub Item 4': self.load_action
},
'Item 3': self.load_action,
'Item 4': {
'Sub Item 1': {
'Sub Sub Item 1': self.load_action,
'Sub Sub Item 2': self.load_action
}
}
}
# Display our menu
self.display()
def display(self):
#TODO: Display the current Menu
print("Display Stuff")
def get_input(self):
#TODO: Get user's input from defined menu
print("Get Input")
def load_action(self, menu_id):
#TODO: Load Action from Menu_ID
print("Load Action")
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 IBM Corp
#
# Author: Tong Li <litong01@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""class for tests in ceilometer/alarm/evaluator/__init__.py
"""
import datetime
import mock
import pytz
from ceilometer.alarm import evaluator
from ceilometer.openstack.common import test
from ceilometer.openstack.common import timeutils
class TestEvaluatorBaseClass(test.BaseTestCase):
def setUp(self):
super(TestEvaluatorBaseClass, self).setUp()
self.called = False
def _notify(self, alarm, previous, reason, details):
self.called = True
raise Exception('Boom!')
def test_base_refresh(self):
notifier = mock.MagicMock()
notifier.notify = self._notify
class EvaluatorSub(evaluator.Evaluator):
def evaluate(self, alarm):
pass
ev = EvaluatorSub(notifier)
ev.api_client = mock.MagicMock()
ev._refresh(mock.MagicMock(), mock.MagicMock(),
mock.MagicMock(), mock.MagicMock())
self.assertTrue(self.called)
def test_base_time_constraints(self):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': ''},
{'name': 'test2',
'description': 'test',
'start': '0 23 * * *', # daily at 23:00
'duration': 10800, # 3 hours
'timezone': ''},
]
cls = evaluator.Evaluator
timeutils.set_time_override(datetime.datetime(2014, 1, 1, 12, 0, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 2, 1, 0, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 2, 5, 0, 0))
self.assertFalse(cls.within_time_constraint(alarm))
def test_base_time_constraints_complex(self):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
# Every consecutive 2 minutes (from the 3rd to the 57th) past
# every consecutive 2 hours (between 3:00 and 12:59) on every day.
'start': '3-57/2 3-12/2 * * *',
'duration': 30,
'timezone': ''}
]
cls = evaluator.Evaluator
# test minutes inside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 3, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 57, 0))
self.assertTrue(cls.within_time_constraint(alarm))
# test minutes outside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 2, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 4, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 58, 0))
self.assertFalse(cls.within_time_constraint(alarm))
# test hours inside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 5, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 11, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
# test hours outside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 1, 31, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 4, 31, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 12, 31, 0))
self.assertFalse(cls.within_time_constraint(alarm))
def test_base_time_constraints_timezone(self):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': 'Europe/Ljubljana'}
]
cls = evaluator.Evaluator
dt_eu = datetime.datetime(2014, 1, 1, 12, 0, 0,
tzinfo=pytz.timezone('Europe/Ljubljana'))
dt_us = datetime.datetime(2014, 1, 1, 12, 0, 0,
tzinfo=pytz.timezone('US/Eastern'))
timeutils.set_time_override(dt_eu.astimezone(pytz.UTC))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(dt_us.astimezone(pytz.UTC))
self.assertFalse(cls.within_time_constraint(alarm))
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
"""
Exception definitions.
"""
class CommandError(Exception):
pass
class ValidationError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoTokenLookupException(Exception):
"""This form of authentication does not support looking up
endpoints from an existing token."""
pass
class EndpointNotFound(Exception):
"""Could not find Service or Region in Service Catalog."""
pass
class EmptyCatalog(Exception):
""" The service catalog is empty. """
pass
class ClientException(Exception):
"""
The base exception class for all exceptions this library raises.
"""
def __init__(self, code, message=None, details=None):
self.code = code
self.message = message or self.__class__.message
self.details = details
def __str__(self):
return "%s (HTTP %s)" % (self.message, self.code)
class BadRequest(ClientException):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""
HTTP 404 - Not found
"""
http_status = 404
message = "Not found"
class Conflict(ClientException):
"""
HTTP 409 - Conflict
"""
http_status = 409
message = "Conflict"
class OverLimit(ClientException):
"""
HTTP 413 - Over limit: you're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
# NotImplemented is a python keyword.
class HTTPNotImplemented(ClientException):
"""
HTTP 501 - Not Implemented: the server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
class ServiceUnavailable(ClientException):
"""
HTTP 503 - Service Unavailable: The server is currently unavailable.
"""
http_status = 503
message = "Service Unavailable"
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
# so we can do this:
# _code_map = dict((c.http_status, c)
# for c in ClientException.__subclasses__())
#
# Instead, we have to hardcode it:
_code_map = dict((c.http_status, c) for c in [BadRequest,
Unauthorized,
Forbidden,
NotFound,
OverLimit,
HTTPNotImplemented,
ServiceUnavailable])
def from_response(response, body):
"""
Return an instance of an ClientException or subclass
based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status != 200:
raise exception_from_response(resp, body)
"""
cls = _code_map.get(response.status, ClientException)
if body:
if hasattr(body, 'keys'):
error = body[body.keys()[0]]
message = error.get('message', None)
details = error.get('details', None)
else:
# If we didn't get back a properly formed error message we
# probably couldn't communicate with Keystone at all.
message = "Unable to communicate with identity service: %s." % body
details = None
return cls(code=response.status, message=message, details=details)
else:
return cls(code=response.status)
|
# -*- coding: utf-8 -*-
"""
Base class for the forward model operator
"""
import numpy as np
import copy
from shutil import rmtree
import os
import subprocess
import tensorflow as tf
import time
class FclassError(Exception):
pass
class Subprocess_op:
"""Class to create a Tensorflow operator from a program called through
subprocess.
"""
def __init__(self,F, dataid, m, name=None):
"""Create a Subprocess_op
Args:
F: A Forward object describing how to launch the program
dataid: A placeholder inputing the ids of the data to be used
(could also contain the actual data)
m: A list of tensors containing the model parameters that can be optimized
(all memebers of m do not need to be actually optimized in a run )
name: A name to be given for this op
"""
self.F=copy.copy(F)
self.dataid=dataid
self.m=m
self.name=name
self._op=None
self._opH=None
self.workdir='./scratch'+''.join([str(k) for k in np.random.randint(0,9,15) ])+'/'
@property
def workdir(self):
return self.__workdir
@workdir.setter
def workdir(self, workdir):
"""Create a unique working directory for this op
Args:
workdir: unique name of the directory. If empty directory is given,
clean the previous working dir if it exist
"""
if hasattr(self,'workdir') and os.path.exists(self.workdir):
rmtree(self.workdir)
self.__workdir=workdir
if workdir and not os.path.exists(workdir):
os.makedirs(workdir)
@property
def opH(self):
if not self._opH:
workdir=self.workdir
read_Hessian=self.F.read_Hessian
output=[]
# Actual gradient:
for m in self.m:
def _Hessian(param_name):
try:
name = [param_name.decode('ascii')]
output = np.array(read_Hessian(workdir, name)[0],
dtype=m.dtype.as_numpy_dtype)
# output= np.array( read_Hessian(workdir, names),
# dtype=np.float64 )
except (FclassError, OSError) as msg:
raise FclassError(msg)
# raise tf.errors.AbortedError(None,None,msg)
return output
# Build the Hessian operator
vstring= m.name.split(':')[0]
output.append(tf.py_func(_Hessian,
[vstring],
[m.dtype],
stateful=True,
name=vstring)[0])
self._opH = output
return self._opH
@property
def op(self):
"""Create the op on a first call, then only returns it
"""
if not self._op:
#No reference to outside objects are permitted in custom op functions _Forward and _Adjoint
#Copy here the reference to avoid this
workdir=self.workdir
set_forward = self.F.set_forward
set_backward = self.F.set_backward
callcmd=self.F.callcmd(self.workdir)
input_residuals=self.F.input_residuals
read_data=self.F.read_data
read_rms=self.F.read_rms
# write_residuals=self.F.write_residuals
write_residuals=None
read_grad=self.F.read_grad
# Actual forward:
def _Forward( dataid, m, param_names):
#Try to launch the forward code,
nmax=2
n=0
success=False
msg=''
while n<nmax and not success:
try:
names = [name.decode('ascii') for name in param_names]
set_forward(dataid,
dict(zip(names, m)),
workdir,
withgrad=not input_residuals)
pipes = subprocess.Popen(callcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
date, std_err = pipes.communicate()
if std_err:
print(std_err.decode())
raise FclassError(std_err.decode())
if input_residuals:
output = np.float32(read_data(workdir))
else:
output = np.float32(read_rms(workdir))
success = True
except (FclassError, OSError) as msg:
n += 1
if n == nmax:
raise FclassError(msg)
# raise tf.errors.AbortedError(None,None,msg)
return output
# Actual gradient:
def _Adjoint_fun( m, param_names, residuals):
try:
names = [name.decode('ascii') for name in param_names]
if input_residuals:
set_backward(workdir, residuals)
pipes = subprocess.Popen(callcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
date, std_err = pipes.communicate()
if std_err:
print(std_err.decode())
raise FclassError(std_err.decode())
output = np.array(read_grad(workdir, names),
dtype=np.float64)
except (FclassError, OSError) as msg:
raise FclassError(msg)
return output
#The gradient function can only input and output tensor objects. To evaluate the gradient and output an numpy array,
#we must still create a new py_func containing the gradient function. When building graph, function in py_fun are not
#evaluated, whereas the grad function of tf.RegisterGradient is. The latter is problematic because when initalizing the graph, we
#not data is transmited and subprocess calls or any operation on tensors value will fail.
def _Adjoint(op, grad ):
return op.inputs[0], tf.py_func(_Adjoint_fun, [op.inputs[1], op.inputs[2], grad], tf.float64 ), op.inputs[2]
param_names=[t.name.split(':')[0] for t in self.m]
for param in self.F.params:
if param not in param_names:
raise tf.errors.InvalidArgumentError(None, None,'This op require param %s to be defined in m'%param)
# Build the Forward operator overriding the gradient with our own function
with tf.name_scope(self.name, "Forward", [self.dataid,self.m, param_names]) as name:
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(_Adjoint)
with tf.get_default_graph().gradient_override_map({"PyFunc": rnd_name}):
self._op=tf.py_func(_Forward, [self.dataid,self.m, param_names], [tf.float32], stateful=True, name=name)
return self._op
def __del__(self):
self.workdir='' #this should delete workdir
if os.path.exists(self.workdir):
rmtree(self.workdir)
class Forward:
"""Base class for Forward model operators.
This class defines the API to add Ops to call an outside program for data
and gradient . You never use this class directly, but instead instantiate
one of its subclasses such as `SeisCL`.
### Usage
```python
# Instantiate a forward object with data and parameters.
F = SeisCL()
dataid=tf.placeholder(dtype=tf.int16)
m=[tf.get_variable(name='vp', initializer=vp0),
tf.get_variable(name='vs', initializer=vs0),
tf.get_variable(name='rho', initializer=rho0)
]
# Add two Ops to the graph earch with their own working directory.
# Each op can be used a standard ops within tensorflow
op1=F.op()
op2=F.op()
```
You can then begin the inversion with Tensorflow training (assuming op1
output the rms value)
```python
# Use training algorithms of Tensorflow:
costfun = tf.losses.mean_squared_error(0, op1, weights=1.0 )
opt = tf.train.GradientDescentOptimizer(10.0)
```
"""
def __init__(self):
"""Create a new Forward model.
This must be overloaded by the constructors of subclasses.
"""
self.params=[] #list of required model parameters
self.input_residuals=False
def set_forward(self, jobids, params, workdir):
"""Sets any files or parameters before executing the forward pass of
modeling program
Args:
jobids: The handle to he batch of data (or the data itself)
params: A dict containing parameter name: numpy_array of model parameters
Returns:
Void
"""
raise NotImplementedError()
def set_backward(self):
"""Sets any files or parameters before executing the backward pass of
modeling program
Args:
jobids: The handle to he batch of data (or the data itself)
params: A dict containing parameter name: numpy_array of model parameters
Returns:
Void
"""
raise NotImplementedError()
def callcmd(self,workdir, os='Darwin'):
"""This function should return the command to call the Forward model
through subprocess
Args:
workdir: The directory from which the command should be called
os: String with the os name, if call command changes with the os
Returns:
A string containing the command to be called by subprocess
"""
raise NotImplementedError()
def read_data(self,workdir):
"""This function should read the data and output it in a numpy array
Args:
workdir: The directory of the data file
Returns:
A list of numpy arrays containing the data
"""
raise NotImplementedError()
def read_grad(self, workdir, param_names):
"""This function should read the gradient and output it in a dictionary
of numpy array
Args:
workdir: The directory of the data file
param_names: A list containing strings with the name of variables
for which to load the gradient
Returns:
A list of numpy arrays containing the gradients
"""
raise NotImplementedError()
def read_Hessian(self, workdir, param_names):
"""This function should read the diagonal approximate Hessian and
output it in a dictionary of numpy array
Args:
workdir: The directory of the data file
param_names: A list containing strings with the name of variables
for which to load the gradient
Returns:
A list of numpy arrays containing the Hessians
"""
raise NotImplementedError()
def read_rms(self, workdir):
"""This function should read the objective function value and output it
along a normalization factor
Args:
workdir: The directory of the data file
Returns:
A tuple with (rms_value, rms_norm) for the batch of data
"""
raise NotImplementedError()
def op(self, dataid, m, name=None):
"""This function output an instance of Subprocess_op which can output
a Tensorflow operator for the forward and grad with the attribute op
computation
Args:
dataid: A placeholder inputing the ids of the data to be used
(could also contain the actual data)
m: A list of tensors containing the model parameters that can be optimized
(all memebers of m do not need to be actually optimized in a run )
name: A name to be given for this op
Returns:
A Subprocess_op instance with an attribut op giving an operator that
can output the data and compute the gradient for the
function called through subprocess
"""
return Subprocess_op(self,dataid,m)
|
try:
with open('VERSION') as f:
version = f.read().strip()
except Exception:
version = '0.0.1'
__version__ = version
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" System tests that obtain the set of available packages for the current platform
It then installs each of those plugin packages via REST API endpoints
"""
import os
import subprocess
import http.client
import json
import pytest
import py
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2019 Dianomic Systems Inc."
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
available_pkg = []
counter = 4
errors = []
""" By default 4 plugins are installed i.e. all north
"""
@pytest.fixture
def reset_packages():
try:
subprocess.run(["$FOGLAMP_ROOT/tests/system/python/scripts/package/remove"], shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "remove package script failed"
@pytest.fixture
def setup_package(package_build_version):
try:
subprocess.run(["$FOGLAMP_ROOT/tests/system/python/scripts/package/setup {}".format(package_build_version)],
shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "setup package script failed"
def load_data_from_json():
_dir = os.path.dirname(os.path.realpath(__file__))
file_path = py.path.local(_dir).join('/').join('data/package_list.json')
with open(str(file_path)) as data_file:
json_data = json.load(data_file)
return json_data
class TestPackages:
def test_reset_and_setup(self, reset_packages, setup_package):
# TODO: Remove this workaround
# Use better setup & teardown methods
pass
def test_ping(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", '/foglamp/ping')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found"
assert 1 < jdoc['uptime']
assert isinstance(jdoc['uptime'], int)
assert 0 == jdoc['dataRead']
assert 0 == jdoc['dataSent']
assert 0 == jdoc['dataPurged']
assert 'FogLAMP' == jdoc['serviceName']
assert 'green' == jdoc['health']
assert jdoc['authenticationOptional'] is True
assert jdoc['safeMode'] is False
def test_available_plugin_packages(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", '/foglamp/plugins/available')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found"
global available_pkg
plugins = available_pkg = jdoc['plugins']
assert len(plugins), "No plugin found"
assert 'link' in jdoc
assert 'foglamp-filter-python35' in plugins
assert 'foglamp-north-http-north' in plugins
assert 'foglamp-north-kafka' in plugins
assert 'foglamp-notify-python35' in plugins
assert 'foglamp-rule-outofbound' in plugins
assert 'foglamp-south-modbus' in plugins
assert 'foglamp-south-playback' in plugins
def test_available_service_packages(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
conn.request("GET", '/foglamp/service/available')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found"
assert 1 == len(jdoc['services'])
assert 'foglamp-service-notification' == jdoc['services'][0]
assert 'link' in jdoc
def test_install_service_package(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
data = {"format": "repository", "name": "foglamp-service-notification"}
conn.request("POST", '/foglamp/service?action=install', json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 'message' in jdoc
assert 'link' in jdoc
assert '{} is successfully installed'.format(data['name']) == jdoc['message']
# verify service installed
conn.request("GET", '/foglamp/service/installed')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found"
assert 3 == len(jdoc['services'])
assert 'notification' in jdoc['services']
def test_install_plugin_package(self, foglamp_url, package_build_source_list, package_build_list):
# FIXME: FOGL-3276 Remove once we have dedicated RPi with sensehat device attached
# otherwise its discovery fails
if 'foglamp-south-sensehat' in available_pkg:
available_pkg.remove('foglamp-south-sensehat')
# When "package_build_source_list" is true then it will install all available packages
# Otherwise install from list as we defined in JSON file
if package_build_source_list.lower() == 'true':
for pkg_name in available_pkg:
self._verify_and_install_package(foglamp_url, pkg_name)
assert not errors, "Package errors have occurred: \n {}".format("\n".join(errors))
else:
json_data = load_data_from_json()
# If 'all' in 'package_build_list' then it will iterate each key in JSON file
if 'all' in package_build_list:
package_build_list = ",".join(json_data.keys())
my_list = package_build_list.split(",")
for pkg_list_cat in my_list:
for k, pkg_list_name in json_data[pkg_list_cat][0].items():
for pkg_name in pkg_list_name:
full_pkg_name = 'foglamp-{}-{}'.format(k, pkg_name)
if full_pkg_name in available_pkg:
self._verify_and_install_package(foglamp_url, full_pkg_name)
else:
print("{} not found in available package list".format(full_pkg_name))
assert not errors, "Package errors have occurred: \n {}".format("\n".join(errors))
def _verify_and_install_package(self, foglamp_url, pkg_name):
print("Installing %s package" % pkg_name)
global counter
global errors
conn = http.client.HTTPConnection(foglamp_url)
data = {"format": "repository", "name": pkg_name}
conn.request("POST", '/foglamp/plugins', json.dumps(data))
r = conn.getresponse()
# assert 200 == r.status
if r.status != 200:
print("POST Install plugin failed due to %s while attempting %s" % (r.reason, pkg_name))
errors.append("POST Install plugin failed due to {} while attempting {}".format(r.reason, pkg_name))
return
r = r.read().decode()
jdoc = json.loads(r)
assert '{} is successfully installed'.format(pkg_name) == jdoc['message']
assert 'link' in jdoc
# Special case: On flirax8 package installation this installs modbus package too as it depends upon
# available package list always in alphabetically sorted order
if pkg_name == 'foglamp-south-flirax8':
available_pkg.remove('foglamp-south-modbus')
counter += 1
counter += 1
conn.request("GET", '/foglamp/plugins/installed')
r = conn.getresponse()
# assert 200 == r.status
if r.status != 200:
print("GET Plugins installed failed due to %s while attempting %s" % (r.reason, pkg_name))
errors.append("GET Plugins installed failed due to {} while attempting {}".format(r.reason, pkg_name))
counter -= 1
return
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found"
# assert counter == len(jdoc['plugins'])
if counter != len(jdoc['plugins']):
print("Error in discovery of %s package" % pkg_name)
errors.append("{} package discovery failed".format(pkg_name))
counter -= 1
|
"""class URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from webob import exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
import nova.network
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import quota
CONF = cfg.CONF
try:
os_network_opts = [
cfg.BoolOpt("enable_network_quota",
default=False,
help="Enables or disables quotaing of tenant networks"),
cfg.StrOpt('use_quantum_default_nets',
default="False",
help=('Control for checking for default networks')),
cfg.StrOpt('quantum_default_tenant_id',
default="default",
help=('Default tenant id when creating quantum '
'networks'))
]
CONF.register_opts(os_network_opts)
except cfg.DuplicateOptError:
# NOTE(jkoelker) These options are verbatim elsewhere this is here
# to make sure they are registered for our use.
pass
if CONF.enable_network_quota:
opts = [
cfg.IntOpt('quota_networks',
default=3,
help='number of private networks allowed per project'),
]
CONF.register_opts(opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
def network_dict(network):
return {"id": network.get("uuid") or network["id"],
"cidr": network["cidr"],
"label": network["label"]}
class NetworkController(object):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_quantum_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception("Failed to get default networks")
def _get_default_networks(self):
project_id = CONF.quantum_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = self.network_api.get_all(context)
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
LOG.debug(_("Showing network with id %s") % id)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
return network_dict(network)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_("Failed to update usages deallocating "
"network."))
LOG.info(_("Deleting network with id %s") % id)
try:
self.network_api.delete(context, id)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
response = exc.HTTPAccepted()
except exception.NetworkNotFound:
response = exc.HTTPNotFound(_("Network not found"))
return response
def create(self, req, body):
if not body:
raise exc.HTTPUnprocessableEntity()
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = dict((k, network.get(k)) for k in keys)
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class Os_tenant_networks(extensions.ExtensionDescriptor):
"""Tenant-based Network Management Extension."""
name = "OSTenantNetworks"
alias = "os-tenant-networks"
namespace = ("http://docs.openstack.org/compute/"
"ext/os-tenant-networks/api/v2")
updated = "2012-03-07T09:46:43-05:00"
def get_resources(self):
ext = extensions.ResourceExtension('os-tenant-networks',
NetworkController())
return [ext]
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks'))
|
""" Abstract Object Detection, Each model implementation has to follow this
apis
"""
import abc
from typing import Tuple, Dict
import numpy as np
from PIL import Image
class AbstractObjectDetection(
metaclass=abc.ABCMeta
): # pylint: disable=missing-class-docstring
@abc.abstractmethod
def get_objects(
self, image_np: np.array, image: Image
) -> Tuple[Dict, object]:
"""
Return the object detection data from model inference pipeline.
Ensure the bbox coordinates are xmin, ymin, xmax, ymax format, and
renormalized to get actual bounding box coordinates.
Return:
{
"detection_classes": [int, int],
"detection_boxes": Nx4 Tensor. The coordinates are renormalized.,
"detection_scores": [float, float]
}
"""
pass # pylint: disable=unnecessary-pass
@abc.abstractmethod
def get_bboxes(self, image_path: str, img_pipeline=None):
"""
Deprecated method, soon be removed, added for backward compatibility
"""
raise NotImplementedError("get_bboxes method isn't defined")
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSyncGroupResult',
'AwaitableGetSyncGroupResult',
'get_sync_group',
]
@pulumi.output_type
class GetSyncGroupResult:
"""
Sync Group object.
"""
def __init__(__self__, id=None, name=None, sync_group_status=None, type=None, unique_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sync_group_status and not isinstance(sync_group_status, str):
raise TypeError("Expected argument 'sync_group_status' to be a str")
pulumi.set(__self__, "sync_group_status", sync_group_status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
pulumi.set(__self__, "unique_id", unique_id)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="syncGroupStatus")
def sync_group_status(self) -> str:
"""
Sync group status
"""
return pulumi.get(self, "sync_group_status")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> Optional[str]:
"""
Unique Id
"""
return pulumi.get(self, "unique_id")
class AwaitableGetSyncGroupResult(GetSyncGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSyncGroupResult(
id=self.id,
name=self.name,
sync_group_status=self.sync_group_status,
type=self.type,
unique_id=self.unique_id)
def get_sync_group(resource_group_name: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
sync_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSyncGroupResult:
"""
Sync Group object.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
:param str sync_group_name: Name of Sync Group resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['storageSyncServiceName'] = storage_sync_service_name
__args__['syncGroupName'] = sync_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20190201:getSyncGroup', __args__, opts=opts, typ=GetSyncGroupResult).value
return AwaitableGetSyncGroupResult(
id=__ret__.id,
name=__ret__.name,
sync_group_status=__ret__.sync_group_status,
type=__ret__.type,
unique_id=__ret__.unique_id)
|
i=0
k=0
def setup():
size(500, 500)
smooth()
strokeWeight(1)
background(0)
def draw():
global i,k
stroke(i, 20)
line(mouseX , mouseY , random(0,500), 500)
i +=k
if(i == 255):
k=-1
if(i == 0):
k=1
def keyPressed():
if (key=="s"):
saveFrame("myProcessing.png")
|
import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_200spin/eco",
graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 200,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run()
|
from os import system
system('Hello')
|
"""empty message
Revision ID: c763cdb7a96c
Revises: 03a541062ae3
Create Date: 2017-11-28 03:13:37.858152
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c763cdb7a96c'
down_revision = '03a541062ae3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('is_admin', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'is_admin')
# ### end Alembic commands ###
|
from pirates.piratesgui.RadarGui import *
from pirates.quest.QuestIndicatorNode import QuestIndicatorNode
from direct.showbase.PythonUtil import report, StackTrace
class QuestIndicatorNodeArea(QuestIndicatorNode):
def __init__(self, questStep):
self.pendingStepObj = None
QuestIndicatorNode.__init__(self, 'AreaIndicator', [
100], questStep)
self.wantBottomEffect = False
return
def delete(self):
if self.pendingStepObj:
base.cr.relatedObjectMgr.abortRequest(self.pendingStepObj)
self.pendingStepObj = None
QuestIndicatorNode.delete(self)
return
def placeInWorld(self):
def stepObjHere(stepObj):
self.pendingStepObj = None
self.reparentTo(stepObj)
self.setPos(0, 0, 1000)
self.setHpr(0, 0, 0)
return
if self.pendingStepObj:
base.cr.relatedObjectMgr.abortRequest(self.pendingStepObj)
self.pendingStepObj = None
self.pendingStepObj = base.cr.relatedObjectMgr.requestObjects([self.questStep.getStepDoId()], eachCallback=stepObjHere)
return
def loadZoneLevel(self, level):
QuestIndicatorNode.loadZoneLevel(self, level)
if level == 0:
self.request('At')
if level == 1:
self.request('Far')
def unloadZoneLevel(self, level):
QuestIndicatorNode.unloadZoneLevel(self, level)
if level == 0:
self.request('Far')
if level == 1:
self.request('Off')
def enterAt(self):
pass
def exitAt(self):
pass
def enterFar(self):
QuestIndicatorNode.enterFar(self)
self.requestTargetRefresh()
def exitFar(self):
QuestIndicatorNode.exitFar(self)
self.stopTargetRefresh()
|
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
def oozie(is_server=False # TODO: see if see can remove this
):
import params
if is_server:
params.HdfsDirectory(params.oozie_hdfs_user_dir,
action="create",
owner=params.oozie_user,
mode=params.oozie_hdfs_user_mode
)
Directory( params.conf_dir,
create_parents = True,
owner = params.oozie_user,
group = params.user_group
)
XmlConfig( "oozie-site.xml",
conf_dir = params.conf_dir,
configurations = params.config['configurations']['oozie-site'],
configuration_attributes=params.config['configurationAttributes']['oozie-site'],
owner = params.oozie_user,
group = params.user_group,
mode = 0664
)
File(format("{conf_dir}/oozie-env.sh"),
owner=params.oozie_user,
content=InlineTemplate(params.oozie_env_sh_template)
)
if params.security_enabled:
tomcat_conf_dir = format("{tomcat_conf_secure}")
else:
tomcat_conf_dir = format("{tomcat_conf}")
File(format("{tomcat_conf_dir}/catalina.properties"),
content = Template("catalina.properties.j2"),
owner = params.oozie_user,
group = params.user_group,
mode = 0755
)
if (params.log4j_props != None):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user,
content=params.log4j_props
)
elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user
)
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
curl -kf -x \"\" \
--retry 5 {jdk_location}/{check_db_connection_jar_name}\
-o {check_db_connection_jar_name}'"),
not_if = format("[ -f {check_db_connection_jar} ]"),
environment=environment
)
oozie_ownership( )
if is_server:
oozie_server_specific( )
def oozie_ownership(
):
import params
File ( format("{conf_dir}/adminusers.txt"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/hadoop-config.xml"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/oozie-default.xml"),
owner = params.oozie_user,
group = params.user_group
)
Directory ( format("{conf_dir}/action-conf"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/action-conf/hive.xml"),
owner = params.oozie_user,
group = params.user_group
)
def oozie_server_specific(
):
import params
File(params.pid_file,
action="delete",
not_if="ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
)
oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
Directory( oozie_server_directorties,
owner = params.oozie_user,
mode = 0755,
create_parents = True
)
cmd1 = "sh"
if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
cmd1 += format(" && cp {jdbc_driver_jar} {oozie_lib_dir}")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
Execute( [cmd1],
not_if = no_op_test
)
# the version of hadoop-auth jar files in bigtop 0.8 oozie is wrong
def correct_hadoop_auth_jar_files():
hadoop_auth_jar_file = "/usr/lib/hadoop/hadoop-auth-2.4.1.jar"
if not os.path.exists(hadoop_auth_jar_file):
raise Fail("Could not find %s" % (hadoop_auth_jar_file))
commands = ' '.join(
(
"if [ -f /usr/lib/oozie/lib/hadoop-auth-2.0.2-alpha.jar ];",
"then",
"rm -rf /usr/lib/oozie/lib/hadoop-auth-2.0.2-alpha.jar;",
"cp " + hadoop_auth_jar_file + " /usr/lib/oozie/lib;",
"fi"
)
)
Execute(commands)
commands = ' '.join(
(
"if [ -f /usr/lib/oozie/libtools/hadoop-auth-2.0.2-alpha.jar ];",
"then",
"rm -rf /usr/lib/oozie/libtools/hadoop-auth-2.0.2-alpha.jar;",
"cp " + hadoop_auth_jar_file + " /usr/lib/oozie/libtools;",
"fi"
)
)
Execute(commands)
|
from copy import deepcopy
import numpy as np
from rlcard.games.blackjack import Dealer
from rlcard.games.blackjack import Player
from rlcard.games.blackjack import Judger
class BlackjackGame(object):
def __init__(self, allow_step_back=False):
''' Initialize the class Blackjack Game
'''
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
def configure(self, game_config):
''' Specifiy some game specific parameters, such as player number
'''
self.player_num = game_config['game_player_num']
def init_game(self):
''' Initialilze the game
Returns:
state (dict): the first state of the game
player_id (int): current player's id
'''
self.dealer = Dealer(self.np_random)
self.players = []
for i in range(self.player_num):
self.players.append(Player(i, self.np_random))
self.judger = Judger(self.np_random)
for i in range(2):
for j in range(self.player_num):
self.dealer.deal_card(self.players[j])
self.dealer.deal_card(self.dealer)
for i in range(self.player_num):
self.players[i].status, self.players[i].score = self.judger.judge_round(self.players[i])
self.dealer.status, self.dealer.score = self.judger.judge_round(self.dealer)
self.winner = {'dealer': 0}
for i in range(self.player_num):
self.winner['player' + str(i)] = 0
self.history = []
self.game_pointer = 0
return self.get_state(self.game_pointer), self.game_pointer
def step(self, action):
''' Get the next state
Args:
action (str): a specific action of blackjack. (Hit or Stand)
Returns:/
dict: next player's state
int: next plater's id
'''
if self.allow_step_back:
p = deepcopy(self.players[self.game_pointer])
d = deepcopy(self.dealer)
w = deepcopy(self.winner)
self.history.append((d, p, w))
next_state = {}
# Play hit
if action != "stand":
self.dealer.deal_card(self.players[self.game_pointer])
self.players[self.game_pointer].status, self.players[self.game_pointer].score = self.judger.judge_round(
self.players[self.game_pointer])
if self.players[self.game_pointer].status == 'bust':
# game over, set up the winner, print out dealer's hand
self.judger.judge_game(self, self.game_pointer)
elif action == "stand":
while self.judger.judge_score(self.dealer.hand) < 17:
self.dealer.deal_card(self.dealer)
self.dealer.status, self.dealer.score = self.judger.judge_round(self.dealer)
self.players[self.game_pointer].status, self.players[self.game_pointer].score = self.judger.judge_round(
self.players[self.game_pointer])
self.judger.judge_game(self, self.game_pointer)
hand = [card.get_index() for card in self.players[self.game_pointer].hand]
if self.is_over():
dealer_hand = [card.get_index() for card in self.dealer.hand]
else:
dealer_hand = [card.get_index() for card in self.dealer.hand[1:]]
for i in range(self.player_num):
next_state['player' + str(i) + ' hand'] = [card.get_index() for card in self.players[i].hand]
next_state['dealer hand'] = dealer_hand
next_state['actions'] = ('hit', 'stand')
next_state['state'] = (hand, dealer_hand)
if self.game_pointer >= self.player_num - 1:
self.game_pointer = 0
else:
self.game_pointer += 1
return next_state, self.game_pointer
def step_back(self):
''' Return to the previous state of the game
Returns:
Status (bool): check if the step back is success or not
'''
#while len(self.history) > 0:
if len(self.history) > 0:
self.dealer, self.players[self.game_pointer], self.winner = self.history.pop()
return True
return False
def get_player_num(self):
''' Return the number of players in blackjack
Returns:
number_of_player (int): blackjack only have 1 player
'''
return self.player_num
@staticmethod
def get_action_num():
''' Return the number of applicable actions
Returns:
number_of_actions (int): there are only two actions (hit and stand)
'''
return 2
def get_player_id(self):
''' Return the current player's id
Returns:
player_id (int): current player's id
'''
return self.game_pointer
def get_state(self, player_id):
''' Return player's state
Args:
player_id (int): player id
Returns:
state (dict): corresponding player's state
'''
'''
before change state only have two keys (action, state)
but now have more than 4 keys (action, state, player0 hand, player1 hand, ... , dealer hand)
Although key 'state' have duplicated information with key 'player hand' and 'dealer hand', I couldn't remove it because of other codes
To remove it, we need to change dqn agent too in my opinion
'''
state = {}
state['actions'] = ('hit', 'stand')
hand = [card.get_index() for card in self.players[player_id].hand]
if self.is_over():
dealer_hand = [card.get_index() for card in self.dealer.hand]
else:
dealer_hand = [card.get_index() for card in self.dealer.hand[1:]]
for i in range(self.player_num):
state['player' + str(i) + ' hand'] = [card.get_index() for card in self.players[i].hand]
state['dealer hand'] = dealer_hand
state['state'] = (hand, dealer_hand)
return state
def is_over(self):
''' Check if the game is over
Returns:
status (bool): True/False
'''
'''
I should change here because judger and self.winner is changed too
'''
for i in range(self.player_num):
if self.winner['player' + str(i)] == 0:
return False
return True
##########################################################
# # For testing
# def _start_game(self):
# while True:
# self.init_game()
# player = self.player.get_player_id()
# #state = self.get_state(player)
# action = ['hit', 'stand']
# while not self.is_over():
# act = random.choice(action)
# print("Status(Player, Dealer): ",(self.player.status, self.dealer.status))
# print("Score(Player, Dealer): ",(self.player.score, self.dealer.score))
# print("Player_action:",act)
# next_state, next_player = self.step(act)
#
# print("Status(Player, Dealer): ",(self.player.status, self.dealer.status))
# print("Score(Player, Dealer): ",(self.player.score, self.dealer.score))
# print(self.winner)
# if self.dealer.score < 17 and self.winner['dealer'] == 1 and self.winner['player'] == 0:
# print(next_state)
# break
#
#if __name__ == "__main__":
# game = BlackjackGame()
# game._start_game()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, '../')
import asyncio
import configparser
import discord
from mafia.game import Game
from mafia.game import Player
from mafiaclients.discord.messenger import Messenger
client = discord.Client()
games = {}
players = {}
class DiscordPlayer(Player):
def __init__(self, member):
nickname = member.nick if member.nick else member.name
super(DiscordPlayer, self).__init__(member, nickname)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('----------')
@client.event
async def on_message(message):
print(message.content)
if message.content.startswith('!hi'):
await client.send_message(message.author, "Hi!")
if message.content.startswith('!game'):
await new_game(message)
if message.content.startswith('!join'):
await join(message)
if message.content.startswith('!vote'):
await vote(message)
if message.content.startswith('!target'):
await target(message)
async def new_game(message):
if message.channel.is_private:
await client.send_message(
message.channel,
'Why are you trying to create a game in a private channel?')
return
if message.channel in games and games[message.channel].phase:
await client.send_message(
message.channel,
('Please wait before the current game is over '
'before starting a new game.'))
return
game_setup = message.content[len('!game'):].strip()
if Game.is_valid_setup(game_setup):
games[message.channel] = Game(
game_setup, Messenger(client, message.channel))
else:
for role in Game.unrecognized_roles(game_setup):
await client.send_message(
message.channel,
'{role} is not a recognized role.'.format(role=role))
async def join(message):
if message.channel.is_private:
await client.send_message(
message.author,
'Please join in the same channel as the game.')
return
if message.channel not in games:
await client.send_message(
message.channel,
'Start a new game with \'!game\'.')
return
player = message.author
if games[message.channel].join(DiscordPlayer(player)):
players[player.name] = message.channel
else:
print('Signups are currently closed.')
async def vote(message):
if message.channel.is_private:
await client.send_message(
message.author,
'Please send votes through the public channel.')
return
if message.channel not in games:
await client.send_message(
message.channel,
'Start a new game with \'!game\'.')
return
player_name = message.author.name
target_name = message.content[len('!vote'):].strip()
games[message.channel].vote(player_name, target_name)
async def target(message):
if not message.channel.is_private:
await client.send_message(
message.author,
'Pst... You might want to send night messages privately.')
return
if message.author.name not in players:
await client.send_message(
message.author,
'Either join a game or start a new game using \'!game\'.')
return
player_name = message.author.name
channel = players[player_name]
target_name = message.content[len("!target"):].strip()
if target_name:
player.target(player_name, target_name)
config = configparser.ConfigParser()
config.read('config.cfg')
client.run(config.get('OAuth2', 'token'))
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/sword/crafted_saber/shared_sword_lightsaber_one_handed_s11_gen3.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_type11")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
"""2017-11-28 18:08:00"""
|
#Alex Holcombe alex.holcombe@sydney.edu.au
#See the README.md for more information: https://github.com/alexholcombe/attentional-blink/blob/master/README.md
#git remote add origin https://github.com/alexholcombe/attentional-blink.git
from __future__ import print_function
import time, sys, os#, pylab
if os.name != 'mac':
from psychopy import parallel
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
from copy import deepcopy
import string
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try: import stringResponse
except ImportError: print('Could not import strongResponse.py (you need that file to be in the same directory)')
try: import letterLineupResponse
except ImportError: print('Could not import letterLineupResponse.py (you need that file to be in the same directory)')
descendingPsycho = True
## SET UP polly attempt ##
# Experiment variables
send_triggers = False
p_started = False #Whether we've started the experiment (used to determine whether to do a dummy trial after show_exit_screen)
p_port = 0xc050
p_targetTriggervalue = 99 #this is the code that is sent when the target is presented
p_stimuliCodes = range(1,27) #this the code to send for stimuli1,stimuli2, etc.
p_startTrialSingleLeft= 100 # codes that are sent at start of trial to give information about the trial
p_startTrialSingleRight= 120
p_startTrialDualLeft= 200
p_startTrialDualRight= 220
p_resp0Lcorrect=110#codes to label the responses (full definitions found here https://docs.google.com/spreadsheets/d/1DFz8UYBf-cw3UojR7Va5u7hPOHZE05YQPRT8jkmjnmg/edit#gid=0)
p_resp0Lincorrect=103
p_resp0Lapprox_1=101
p_resp0Lapprox_2=102
p_resp0Lapprox1=111
p_resp0Lapprox2=112
p_resp0Rcorrect=130
p_resp0Rincorrect=123
p_resp0Rapprox_1=121
p_resp0Rapprox_2=122
p_resp0Rapprox1=131
p_resp0Rapprox2=132
p_resp1Lcorrect=210
p_resp1Lincorrect=203
p_resp1Lapprox_1=201
p_resp1Lapprox_2=202
p_resp1Lapprox1=211
p_resp1Lapprox2=212
p_resp1Rcorrect=230
p_resp1Rincorrect=223
p_resp1Rapprox_1=221
p_resp1Rapprox_2=222
p_resp1Rapprox1=231
p_resp1Rapprox2=232
p_wait = 0.001
#Whether AB task is run, dual stream, or both, is determined by setup of conditions. Search for trialHandler
tasks=['T1','T1T2']; task = tasks[1]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=True
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=False
autoLogging=False
if demo:
refreshRate = 60.; #100 LN: refresh rate for previous AB and RSVP task for gamers was 60
# Open parallel port and make sure it is at 0
if send_triggers:
p_port = parallel.ParallelPort(address=p_port)
p_port.setData(0)
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
threshCriterion = 0.58
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 2.5 #6 deg, as in Martini E2 Letters should have height of 2.5 deg
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 1024 #800 #monitor height in pixels
monitorwidth = 40.5 #monitor width in cm
scrn=0 #0 to use main screen, 1 to use external screen connected to computer
fullscr=False #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 400; heightPix = 400
monitorwidth = 13.0
fullscr=False; scrn=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':False, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate': 60 }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='AB or dualstream experiment OR staircase to find thresh noise level for T1 performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
numLettersToPresent = 26
SOAms = 133 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 80 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numLettersToPresent*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 1.00 #to use if no staircase, can be set by user
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
#myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
#dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color='DimGrey') #color names not working in certain old versions of Psychopy
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
#trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
#print('trialsPerCondition=',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *1.0 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=10,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb', color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.7),colorSpace='rgb', color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb', color=(1,1,0),alignHoriz='center', alignVert='center',height=.16,units='norm',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb', color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb', color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
#SETTING THE CONDITIONS
#For the optional attentional blink
doAB= False
if doAB:
stimListAB = []
trialsPerConditionAB = 1 #4 #default value
possibleCue1positions = np.array([7,9,11,13,15,17]) #used in Martini E2, group 2 lizzy double check this
possibleCue2lags = np.array([1,2,3,4,6,10]) # np.array([1,2,5,8,10]) for VGP: 1,2,3,4,6,10
for cue1pos in possibleCue1positions:
for cue2lag in possibleCue2lags:
stimListAB.append( {'targetLeftRightIfOne':'left','numStreams':1, 'task':'T1T2', 'cue1pos':cue1pos, 'cue2lag':cue2lag } ) #Charlie (28/6): same changes as other version of code. First two keys in the dict are new
trialsAB = data.TrialHandler(stimListAB,trialsPerCondition) #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimListAB,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
#For the dual-stream simultaneous target
stimListDualStream=[]
possibleCuePositions = np.array([7,9,11,13,15,17])
for task in ['T1','T1T2']: #T1 task is just for the single-target tasks, but both streams are presented
for targetLeftRightIfOne in ['left','right']: #If single target, should it be on the left or the right?
for cuesPos in possibleCuePositions:
for firstRespLRifTwo in ['left','right']: #If dual target and lineup response, should left one or right one be queried first?
stimListDualStream.append(
{'numStreams':2, 'task':task, 'targetLeftRightIfOne':targetLeftRightIfOne, 'cue1pos':cuesPos, 'firstRespLRifTwo': firstRespLRifTwo, 'cue2lag':0 }
) #cue2lag = 0, meaning simultaneous targets
trialsPerConditionDualStream = 1 #10 #max(1, trialsAB.nTotal / len(stimListDualStream) )
trialsDualStream = data.TrialHandler(stimListDualStream,trialsPerConditionDualStream) #constant stimuli method
logging.info( ' each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' )
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
#if type(number) != type(5) and type(number) != type(np.array([3])[0]): #not an integer or numpy.int32
# return ('@')
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 1
elif task=='T1T2':
numRespsWanted = 2
print('targetLeftRightIfOne\t',end='',file=dataFile)
for i in range(numRespsWanted):
dataFile.write('answerPos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cues,letterSeqStream1,letterSeqStream2,cueDurFrames,letterDurFrames,ISIframes,cuesPos,
numStreams,ltrsDrawObjectsStream1,ltrsDrawObjectsStream2,
noise,noise2,proportnNoise,allFieldCoords,allFieldCoords2,numNoiseDots ):
#defining a function to draw each frame of stim.
SOAframes = letterDurFrames+ISIframes
cueFrames = cuesPos*SOAframes #cuesPos is global variable
letterN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter. frameOfThisLetter means how many frames into this letter we are. 0 means the first frame of this letter
if letterN==0 and frameOfThisLetter==0:
#send triggers for start of trial
if send_triggers:
send_trigger_to_port
if task== 'T1' and targetLeftRightIfOne=='left':
send_trigger_to_port (p_startTrialSingleLeft)
if task== 'T1' and targetLeftRightIfOne=='right':
send_trigger_to_port (p_startTrialSingleRight)
if task== 'T1T2' and firstRespLRifTwo=='left':
send_trigger_to_port (p_startTrialDualLeft)
if task== 'T1T2' and firstRespLRifTwo=='right':
send_trigger_to_port(p_startTrialDualRight)
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisLetterIdx = letterSeqStream1[letterN] #which letter, from A to Z (1 to 26), should be shown?
if send_triggers:
if cuesTimeToDraw[cueN] == False:
send_trigger_to_port (p_stimuliCodes=letterN)
thisLetterIdx2 = letterSeqStream2[letterN] #which letter, from A to Z (1 to 26), should be shown?
##only send one trigger at a time?
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cuesTimeToDraw = list([False])*len(cues) #if don't use this, for AB task, bg color T2 cue will be drawn on top of T1 cue
for cue in cues: #might be at same time, or different times
cue.setLineColor( bgColor )
for cueN in xrange(len(cuesPos)): #For each cue, see whether it is time to draw it
thisCueFrameStart = cueFrames[cueN]
if n>=thisCueFrameStart and n<thisCueFrameStart+cueDurFrames:
cues[cueN].setLineColor( cueColor )
cuesTimeToDraw[cueN] = True
#send triggers if cue?
if send_triggers:
if n == 0:
send_trigger_to_port(p_targetTriggervalue)
elif n == cuesPos[0] or (task == 'T1T2' and n == cuesPos[1]):
send_trigger_to_port(p_targetTriggervalue)
for cueN in xrange(len(cues)):
if cuesTimeToDraw[cueN] == True:
cues[cueN].draw()
if showLetter:
ltrsDrawObjectsStream1[thisLetterIdx].setColor( letterColor )
else: ltrsDrawObjectsStream1[thisLetterIdx].setColor( bgColor )
if numStreams==1:
ltrsDrawObjectsStream1[thisLetterIdx].pos = (0,0)
else: ltrsDrawObjectsStream1[thisLetterIdx].pos = (-cueOffset,0)
ltrsDrawObjectsStream1[thisLetterIdx].draw()
if numStreams==2:
if showLetter:
ltrsDrawObjectsStream2[thisLetterIdx2].setColor( letterColor )
else: ltrsDrawObjectsStream2[thisLetterIdx2].setColor( bgColor )
ltrsDrawObjectsStream2[thisLetterIdx2].draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords) #refresh the noise by shuffling the possible locations of noise dots
dotCoords = allFieldCoords[0:numNoiseDots] #Take the first numNoiseDots random locations to plot the dots
cueOffsetInPix = noiseOffsetKludge*cueOffset*pixelperdegree #Because the noise coords were drawn in pixels but the cue position is specified in deg, I muyst convert pix to deg
dotCoords[:,0] += cueOffsetInPix #Displace the noise to present it over the letter stream
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
if numStreams==2:
noise2.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cues = list()
for cueN in xrange(2):
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=2.0, #in pixels
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
cues.append(cue)
#predraw all 26 letters
ltrHeight = 3 #Martini letters were 2.5deg high
cueOffset = 6
noiseOffsetKludge = 0.0
ltrsDrawObjectsStream1 = list()
ltrsDrawObjectsStream2 = list()
for i in range(0,26): #need to add the font Sloan to computer
letterStream1 = visual.TextStim(myWin,pos=(-cueOffset,0), font= 'sloan', colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
letterStream2 = visual.TextStim(myWin,pos=(cueOffset,0), font= 'sloan', colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
letterStream1.setHeight( ltrHeight ); letterStream2.setHeight( ltrHeight )
letter = numberToLetter(i)
letterStream1.setText(letter,log=False); letterStream2.setText(letter,log=False)
letterStream1.setColor(bgColor); letterStream2.setColor(bgColor)
ltrsDrawObjectsStream1.append(letterStream1)
ltrsDrawObjectsStream2.append(letterStream2)
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *0.99 #1.0 makes noise sometimes intrude into circle
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
if doAB:
nTrialsCorrectT2eachLag = np.zeros(len(possibleCue2lags)); nTrialsEachLag = np.zeros(len(possibleCue2lags))
nTrialsApproxCorrectT2eachLag = np.zeros(len(possibleCue2lags));
numRightWrongEachCuepos = np.zeros([ len(possibleCue1positions), 1 ]); #summary results to print out at end
numRightWrongEachCue2lag = np.zeros([ len(possibleCue2lags), 1 ]); #summary results to print out at end
def send_trigger_to_port(trigger_value):
#trigger_value is the integer (1 to 255) that you want to send to the EEG system
#This function will set the channel to that integer for wait_time, and then set the channel back to zero (no message)
wait_time = .0004 #seconds
p_port.setData(trigger_value)
core.wait(wait_time) #wait wait_time seconds to make sure the message was received
p_port.setData(0)
def do_RSVP_stim(numStreams, task, targetLeftRightIfOne, cue1pos, cue2lag, proportnNoise,trialN):
#relies on global variables:
# logging, bgColor
#
print("numStreams = ",numStreams)
cuesPos = [] #will contain the positions of all the cues (targets) kkkkk
cuesPos.append(cue1pos)
if task=='T1T2':
cuesPos.append(cue1pos+cue2lag)
#target on only one side will be task 'T1' so only one cue
cuesPos = np.array(cuesPos)
letterSeqStream1 = np.arange(0,26)
letterSeqStream2 = np.arange(0,26)
np.random.shuffle(letterSeqStream1)
np.random.shuffle(letterSeqStream2)
while (letterSeqStream1==letterSeqStream2).any():
np.random.shuffle(letterSeqStream2)
if numStreams ==1:
if targetLeftRightIfOne=='left':
correctAnsStream1 = np.array( letterSeqStream1[cuesPos] )
elif targetLeftRightIfOne=='right':
correctAnsStream1 = np.array( letterSeqStream2[cuesPos] )
else:
print("UNEXPECTED targetLeftRightIfOne value!")
correctAnsStream2 = np.array([]) #because there is no stream 2
if numStreams ==2:
if task =='T1T2':
correctAnsStream1 = np.array(letterSeqStream1[cuesPos[0]])
correctAnsStream2 = np.array(letterSeqStream2[cuesPos[1]] )
elif task=='T1':
correctAnsStream1 = np.array( letterSeqStream1[cuesPos[0]] )
correctAnsStream2 = np.array( letterSeqStream2[cuesPos[0]] )
print("corrrectAnsStream1=",correctAnsStream1, " or ", numberToLetter(correctAnsStream1), " correctAnsStream2=",correctAnsStream2, " or ", numberToLetter(correctAnsStream2) ) #debugOFF
#set cue positions
if task=='T1':
if targetLeftRightIfOne == 'left':
cues[0].setPos([-cueOffset,0])
cues[1].setPos([999,999]) #offscreen
elif targetLeftRightIfOne == 'right':
cues[0].setPos([cueOffset,0])
cues[1].setPos([999,999]) #offscreen
if task=='T1T2':
if numStreams==1:
for cue in cues:
cue.setPos([0,0])
elif numStreams==2:
cues[0].setPos([-cueOffset,0])
cues[1].setPos([cueOffset,0])
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #generating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor) #for the left stream, or the only stream
(noise2,allFieldCoords2,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor) #for the right stream
#Work out how to displace the noise so it will be on top of the streams, and then displace it
cueOffsetInPix = int(round(noiseOffsetKludge*cueOffset*pixelperdegree)) #Because the noise coords were drawn in pixels but the cue position is specified in deg, I muyst convert pix to deg
#print('allFieldCoords[1:3][0]=', allFieldCoords[1:3][0])
allFieldCoords[:,0] += cueOffsetInPix #Displace the noise to present it over the letter stream
allFieldCoords2[:,0] -= cueOffsetInPix #Displace the noise to present it over the letter stream
# print('cueOffsetInPix=',cueOffsetInPix, 'allFieldCoords[1:3][0]=', allFieldCoords[1:3][0])
dotCoords = allFieldCoords[0:numNoiseDots] #Take the first numNoiseDots random locations to plot the dots
dotCoords2 = allFieldCoords2[0:numNoiseDots] #Take the first numNoiseDots random locations to plot the dots
noise.setXYs(dotCoords)
noise2.setXYs(dotCoords2)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
for cue in cues:
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s (changed when Fahed ran outer ring ident)
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
if numStreams==2:
fixationPoint.draw()
worked = oneFrameOfStim( n,cues,letterSeqStream1,letterSeqStream2,cueDurFrames,letterDurFrames,ISIframes,cuesPos,
numStreams,ltrsDrawObjectsStream1, ltrsDrawObjectsStream2,
noise,noise2,proportnNoise,allFieldCoords,allFieldCoords2,numNoiseDots) #draw letter and possibly cue and noise on top
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('Which letter was circled?',log=False)
elif task=='T1T2':
respPromptStim.setText('Which two letters were circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
return letterSeqStream1,letterSeqStream2,cuesPos,correctAnsStream1,correctAnsStream2, ts
def handleAndScoreResponse(passThisTrial,responses,responsesAutopilot,task,targetLeftRightIfOne,firstRespLRifTwo, numStreams,
letterSeqStream1,letterSeqStream2,cuesPos,correctAnsStream1,correctAnsStream2):
#Handle response, calculate whether correct, ########################################
if autopilot or passThisTrial:
responses = responsesAutopilot
if autopilot: print("autopilot and fake responses are:",responses)
correctAnswers = correctAnsStream1
if numStreams>1:
print('correctAnsStream1=',correctAnsStream1,'correctAnsStream2=',correctAnsStream2,'task=',task,'targetLeftRightIfOne=',targetLeftRightIfOne)
if task=='T1':
if targetLeftRightIfOne == 'left':
correctAnswers = np.array([correctAnsStream1])
elif targetLeftRightIfOne == 'right':
correctAnswers = np.array([correctAnsStream2])
elif task=='T1T2':
if correctAnsStream1.size == 1: #you are not allowed to concatenate arrays of length 1
correctAnswers = np.array([correctAnsStream1, correctAnsStream2])
else:
correctAnswers = np.concatenate((correctAnsStream1, correctAnsStream2))
eachCorrect = np.zeros( len(np.atleast_1d(correctAnsStream1)) + (numStreams-1)*len(np.atleast_1d(correctAnsStream2)) )
eachApproxCorrect = np.zeros( len(np.atleast_1d(correctAnsStream1)) + (numStreams-1)*len(np.atleast_1d(correctAnsStream2)) )
posOfResponse = np.zeros( len(cuesPos) )
responsePosRelative = np.zeros( len(cuesPos) )
for cueI in range(len(cuesPos)): #score response to each cue
thisLetterSeq = letterSeqStream1
if task=='T1':
if targetLeftRightIfOne=='right':
thisLetterSeq = letterSeqStream2
if numStreams>1:
if cueI>0:
thisLetterSeq = letterSeqStream2
if correctAnswers[cueI] == letterToNumber( responses[cueI] ):
eachCorrect[cueI] = 1
posThisResponse= np.where( letterToNumber(responses[cueI])==thisLetterSeq )
print('for cue ',cueI,' responses=',responses,'correctAnswers=',correctAnswers,'posThisResponse=',posThisResponse,' letterSeqStream1=',letterSeqStream1,' letterSeqStream2=',letterSeqStream2) #debugOFF
posThisResponse= posThisResponse[0] #list with potentially two entries, want first which will be array of places where the response was found in the letter sequence
if len(posThisResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
if np.alen(posThisResponse)==0: #response not found in letter sequence
posThisResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posThisResponse = posThisResponse[0]
posOfResponse[cueI]= posThisResponse
responsePosRelative[cueI] = posOfResponse[cueI] - cuesPos[cueI]
eachApproxCorrect[cueI] += abs(responsePosRelative[cueI]) <= 3 #Vul efficacy measure of getting it right to within plus/minus
if numStreams>1:
print("correctAnsStream1=",correctAnsStream1, " or ", numberToLetter(correctAnsStream1), " correctAnsStream2=",correctAnsStream2, " or ", numberToLetter(correctAnsStream2), 'correctAnswers=',correctAnswers ) #debugON
print("eachCorrect=",eachCorrect) #debugOFF
for cueI in range(len(cuesPos)): #print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuesPos[cueI],'\t', end='', file=dataFile) #answerPos
if numStreams>1:
if cueI>0:
thisLetterSeq = letterSeqStream2
else:
thisLetterSeq = letterSeqStream1
answerCharacter = numberToLetter( correctAnswers[cueI] )
print(answerCharacter, '\t', end='', file=dataFile) #answer0
print(responses[cueI], '\t', end='', file=dataFile) #response0
print(eachCorrect[cueI] , '\t', end='',file=dataFile) #correct0
print(responsePosRelative[cueI], '\t', end='',file=dataFile) #responsePosRelative0
if send_triggers:
if responsePosRelative[cueI]==responsePosRelative0:
if targetLeftRightIfOne=='left':
#p_middleOfRangeForSerialPositionError for serial position error of 0 is 128.
code = p_middleOfRangeForSerialPositionError + responsePosRelative[cueI]
send_trigger_to_port(code)
#p_firstTargetIncorrect = 232 code for incorrect for the first target is 232
#code for correct for first target is 233
#code for incorrect for second target is 234
#code for correct for second target is 235
incorrectThisCue = p_firstTargetIncorrect + cueI*2 #when cueI=0, then 232. When cueI=1, 234.
send_trigger_to_port( incorrectThisCue + eachCorrect[cueI])
if responsePosRelative0[0]:
send_trigger_to_port(p_resp0Lcorrect)
if responsePosRelative0[-1]:
send_trigger_to_port(p_resp0Lapprox_1)
if responsePosRelative0[-2]:
send_trigger_to_port(p_resp0Lapprox_2)
if responsePosRelative0[1]:
responsePosRelative0(p_resp0Lapprox1)
if responsePosRelative0[2]:
send_trigger_to_port(p_resp0Lapprox2)
else:
send_trigger_to_port(p_resp0Lincorrect)
if targetLeftRightIfOne=='right':
if responsePosRelative0[0]:
send_trigger_to_port(p_resp0Rcorrect)
if responsePosRelative0[-1]:
send_trigger_to_port(p_resp0Rapprox_1)
if responsePosRelative0[-2]:
send_trigger_to_port(p_resp0Rapprox_2)
if responsePosRelative0[1]:
responsePosRelative0(p_resp0Rapprox1)
if responsePosRelative0[2]:
send_trigger_to_port(p_resp0Rapprox2)
else:
send_trigger_to_port(p_resp0Rincorrect)
if responsePosRelative[cueI]==responsePosRelative1:
if firstRespLRifTwo=='left':
if responsePosRelative1[0]:
send_trigger_to_port(p_resp1Rcorrect)
if responsePosRelative1[-1]:
send_trigger_to_port(p_resp1Rapprox_1)
if responsePosRelative1[-2]:
send_trigger_to_port(p_resp1Rapprox_2)
if responsePosRelative1[1]:
responsePosRelative0(p_resp1Rapprox1)
if responsePosRelative1[2]:
send_trigger_to_port(p_resp1Rapprox2)
else:
send_trigger_to_port(p_resp1Rincorrect)
if firstRespLRifTwo=='right':
if responsePosRelative1[0]:
send_trigger_to_port(p_resp1Lcorrect)
if responsePosRelative1[-1]:
send_trigger_to_port(p_resp1Lapprox_1)
if responsePosRelative1[-2]:
send_trigger_to_port(p_resp1Lapprox_2)
if responsePosRelative1[1]:
responsePosRelative1(p_resp1Lapprox1)
if responsePosRelative1[2]:
send_trigger_to_port(p_resp1Lapprox2)
else:
send_trigger_to_port(p_resp1Lincorrect)
print('for cueI=',cueI,' cuesPos[cueI]=',cuesPos[cueI], ' answerCharacter=',answerCharacter, ' responses[cueI]=',responses[cueI], ' eachCorrect[cueI]=',eachCorrect[cueI],' resopnsePosRelative[cueI]= ',responsePosRelative[cueI])
if task=='T1T2':
correct = eachCorrect.all()
elif task=='T1':
correct = eachCorrect[0]
T1approxCorrect = eachApproxCorrect[0]
if len(cuesPos)<2: #Because assuming this experimnet has also got trials with two targets, need to print out blank values if there was only one target.
print('NaN', '\t', end='', file=dataFile) #answerPos1
print('NaN', '\t', end='', file=dataFile) #answer1
print('NaN', '\t', end='', file=dataFile) #response1
print('NaN' , '\t', end='',file=dataFile) #correct1
return correct,eachCorrect,eachApproxCorrect,T1approxCorrect,passThisTrial,expStop
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
highA.play() #low.play()
myMouse = event.Mouse()
expStop=False; framesSaved=0
nDone = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
letterSeqStream1,letterSeqStream2, cuesPos,correctAnsStream1,correctAnsStream2, ts = do_RSVP_stim(numStreams,thisTrial['task'],thisTrial['targetLeftRightIfOne'],cue1pos, cue2lag, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
if thisTrial['task']=='T1':
numRespsWanted = 1
elif thisTrial['task']=='T1T2':
numRespsWanted = 2
responseDebug=False; responses = list(); responsesAutopilot = list(); #collect responses
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',thisTrial['task'],'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,eachCorrect,eachApproxCorrect,T1approxCorrect,passThisTrial,expStop = (
handleAndScoreResponse(passThisTrial,responses,responsesAutopilot,thisTrial['task'],thisTrial['targetLeftRightIfOne'],letterSequence,cuesPos,correctAnswers) )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE #################################################################
## ## ## #########################################################
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trialsAB.nTotal if doAB else 0)+ ' AB and ' + str(trialsDualStream.nTotal) + ' dualstream trials. Letters will be drawn with superposed noise of' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
ABfirst = False
nDone =0
totalTrials = 0
if ABfirst and trialsAB != None:
msg='Starting AB part of experiment'
trials = trialsAB
totalTrials += trialsAB.nTotal
logging.info(msg); print(msg)
else:
msg = "Starting dual stream part of experiment"
trials = trialsDualStream
logging.info(msg); print(msg)
totalTrials += trialsDualStream.nTotal
while nDone < totalTrials and expStop==False:
print("nDone = ", nDone, " out of ", totalTrials, " trials.nRemaining = ", trials.nRemaining)
#Control which block we are in, AB or dualStream
if trials.nRemaining == 0: # trialsAB.nTotal:
if trials == trialsAB: #check if reached end of the AB part
trials = trialsDualStream
msg = "Starting dual stream part of experiment"
logging.info(msg); print(msg)
elif trials == trialsDualStream: #check if reached end of dual stream part
trials = trialsAB
msg='Starting AB part of experiment'
logging.info(msg); print(msg)
#end control of which block we are in
thisTrial = trials.next() #get a proper (non-staircase) trial
cue1pos = thisTrial['cue1pos']
cue2lag = None
if thisTrial['task']=="T1T2":
cue2lag = thisTrial['cue2lag']
numStreams = thisTrial['numStreams']
letterSeqStream1,letterSeqStream2,cuesPos,correctAnsStream1,correctAnsStream2,ts = do_RSVP_stim(numStreams,thisTrial['task'],thisTrial['targetLeftRightIfOne'],cue1pos, cue2lag, noisePercent/100.,nDone)
numCasesInterframeLong = timingCheckAndLog(ts,nDone)
if thisTrial['task']=='T1':
numRespsWanted = 1
elif thisTrial['task']=='T1T2':
numRespsWanted = 2
responseDebug=False; responses = list(); responsesAutopilot = list(); #collect responses
print("autopilot=",autopilot)
lineupResponse = True
if lineupResponse:
bothSides = True
if thisTrial['task']=='T1':
bothSides = False
sideFirst = thisTrial['targetLeftRightIfOne']
else:
sideFirst = thisTrial['firstRespLRifTwo']
alphabet = list(string.ascii_uppercase)
possibleResps = alphabet
#possibleResps.remove('C'); possibleResps.remove('V')
expStop,passThisTrial,responses,responsesAutopilot = \
letterLineupResponse.doLineup(myWin,myMouse,clickSound,badKeySound,possibleResps,bothSides,sideFirst,autopilot)
else:
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
print('expStop=',expStop,' passThisTrial=',passThisTrial,' responses=',responses, ' responsesAutopilot =', responsesAutopilot)
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(nDone,'\t', end='', file=dataFile)
print(subject,'\t',thisTrial['task'],'\t', round(noisePercent,3),'\t', thisTrial['targetLeftRightIfOne'],'\t', end='', file=dataFile)
correct,eachCorrect,eachApproxCorrect,T1approxCorrect,passThisTrial,expStop = (
handleAndScoreResponse(passThisTrial,responses,responsesAutopilot,thisTrial['task'],thisTrial['targetLeftRightIfOne'],thisTrial['firstRespLRifTwo'],numStreams,letterSeqStream1,letterSeqStream2,cuesPos,correctAnsStream1,correctAnsStream2) )
print('Scored response. correct=', correct) #debug
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
#Send a message to EEG indicating whether correct on each side. eachCorrect[0] is whether they got exactly correct the left side, eachCorrect[1] is right side
#if send_triggers:
#numRespsWanted indicates how many streams were cued (for standard dual-stream task)
#streamsCued = bothSides+1
#streamsCuedPartOfMessage = str(streamsCued)
#if streamsCuedPartOfMessage
#leftOrRightIfOneStreamPartOfMessage =
#send_trigger_to_port(p_stimulivalue)
#Send a message to EEG indicating whether approx correct on each side
numTrialsCorrect += correct #so count -1 as 0
numTrialsApproxCorrect += eachApproxCorrect.all()
if thisTrial['task']=="T1T2":
numTrialsEachCorrect += eachCorrect
numTrialsEachApproxCorrect += eachApproxCorrect
if numStreams==1:
cue2lagIdx = list(possibleCue2lags).index(cue2lag)
nTrialsCorrectT2eachLag[cue2lagIdx] += eachCorrect[1]
nTrialsApproxCorrectT2eachLag[cue2lagIdx] += eachApproxCorrect[1]
nTrialsEachLag[cue2lagIdx] += 1
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('exported/frames.mov')
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDone+=1
dataFile.flush(); logging.flush()
print('nDone=', nDone,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDone > 2 and nDone %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(trials.thisN) + ' of ' + str(trials.nTotal) + ' trials' #EVA if this doesn't work, change it to progressMsg = ' '
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = False
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
logging.flush()
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDone) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if (nDone >0):
print('Of ',nDone,' trials, on ',numTrialsCorrect*1.0/nDone*100., '% of all trials all targets reported exactly correct',sep='')
print('All targets approximately correct in ',round(numTrialsApproxCorrect*1.0/nDone*100,1),'% of trials',sep='')
if doAB:
print('T1: ',round(numTrialsEachCorrect[0]*1.0/nDone*100.,2), '% correct',sep='')
if len(numTrialsEachCorrect) >1:
print('T2: ',round(numTrialsEachCorrect[1]*1.0/nDone*100,2),'% correct',sep='')
print('T1: ',round(numTrialsEachApproxCorrect[0]*1.0/nDone*100,2),'% approximately correct',sep='')
if len(numTrialsEachCorrect) >1:
print('T2: ',round(numTrialsEachApproxCorrect[1]*1.0/nDone*100,2),'% approximately correct',sep='')
print('T2 for each of the lags,',np.around(possibleCue2lags,0),': ', np.around(100*nTrialsCorrectT2eachLag / nTrialsEachLag,3), '%correct, and ',
np.around(100*nTrialsApproxCorrectT2eachLag/nTrialsEachLag,3),'%approximately correct')
dataFile.close()
myWin.close() #have to close window if want to show a plot
#ADD PLOT OF AB PERFORMANCE?
|
#!/usr/bin/env python3
import sys
# Functions
def read_numbers():
try:
n = int(sys.stdin.readline())
k = int(sys.stdin.readline())
v = [int(sys.stdin.readline()) for _ in range(n)]
except ValueError:
return 0, 0, None
return n, k, v
def compute_unfairness(n, k, v):
v.sort() # Order from smallest to largest
u = v[n - 1] - v[0] # Initial unfairness
for i in range(0, n - k + 1):
d = v[i + k - 1] - v[i] # Difference between max and min in sequence
u = min(u, d) # Take smallest unfairness
return u
# Main execution
def main():
n, k, v = read_numbers()
while n:
print(compute_unfairness(n, k, v))
n, k, v = read_numbers()
if __name__ == '__main__':
main()
|
from sparseml.keras.optim import ScheduledModifierManager
from tensorflow.keras.utils import to_categorical
from autogoal.contrib.keras import KerasClassifier
from autogoal.grammar import CategoricalValue
def build_sparseml_keras_classifier(path_to_recipe: str):
"""Build custom KerasClassifier algorithm applaying sparcification techniques provided in path_to_recipe yaml file."""
class SparseMLKerasClassifier(KerasClassifier):
def __init__(
self,
optimizer: CategoricalValue("sgd", "adam", "rmsprop"),
grammar=None,
**kwargs
) -> None:
self._path = path_to_recipe
self._manager = ScheduledModifierManager.from_yaml(self._path)
super().__init__(
grammar=grammar or self._build_grammar(), optimizer=optimizer, **kwargs
)
def fit(self, X, y):
self._classes = {k: v for k, v in zip(set(y), range(len(y)))}
self._inverse_classes = {v: k for k, v in self._classes.items()}
y = [self._classes[yi] for yi in y]
y = to_categorical(y)
# Create Model
if self._graph is None:
raise TypeError(
"You must call `sample` to generate the internal model."
)
self._build_nn(self._graph, X, y)
# Sparcificate Model
old_model = self._model
self._model = self._manager.finalize(old_model)
self._model.compile(**self._compile_kwargs)
# Fit Model
self._fit_model(X, y)
return SparseMLKerasClassifier
|
from collections import defaultdict
PAD = u'PAD'
UNK = u'UNKNOWN'
MARK = u'MARK'
NMARK = u'NMARK'
NA = u'NA'
GA = u'Ga'
O = u'O'
NI = u'Ni'
PRD = u'V'
GA_INDEX = 0
O_INDEX = 1
NI_INDEX = 2
class Vocab(object):
def __init__(self):
self.i2w = []
self.w2i = {}
def add_word(self, word):
if word not in self.w2i:
new_id = self.size()
self.i2w.append(word)
self.w2i[word] = new_id
def has_key(self, word):
return self.w2i.has_key(word)
def get_id(self, word):
return self.w2i.get(word)
def get_word(self, w_id):
return self.i2w[w_id]
def set_init_word(self):
self.add_word(PAD)
def set_pas_labels(self):
self.add_word(NA)
self.add_word(GA)
self.add_word(O)
self.add_word(NI)
self.add_word(PRD)
def set_pas_labels_char(self):
bio = ['B-', 'I-']
self.add_word(NA)
for i in bio:
self.add_word(i + GA)
for i in bio:
self.add_word(i + O)
for i in bio:
self.add_word(i + NI)
for i in bio:
self.add_word(i + PRD)
def add_vocab(self, word_freqs, vocab_cut_off=0):
for w, freq in sorted(word_freqs.items(), key=lambda k, v: -v):
if freq <= vocab_cut_off:
break
self.add_word(w)
def add_vocab_from_corpus(self, corpus, min_unit='word', vocab_cut_off=0):
word_freqs = self.get_word_freqs(corpus, min_unit)
self.add_vocab(word_freqs, vocab_cut_off)
def add_vocab_from_lists(self, corpus, vocab_cut_off=0):
word_freqs = self.get_word_freqs_in_lists(corpus)
self.add_vocab(word_freqs, vocab_cut_off)
@staticmethod
def get_word_freqs(corpus, min_unit='word'):
word_freqs = defaultdict(int)
for doc in corpus:
for sent in doc:
for w in sent:
if min_unit == 'word':
word_freqs[w.form] += 1
else:
for c in w.chars:
word_freqs[c] += 1
return word_freqs
@staticmethod
def get_word_freqs_in_lists(corpus):
word_freqs = defaultdict(int)
for n_best_list in corpus:
for w in n_best_list.words:
word_freqs[w.form] += 1
return word_freqs
def size(self):
return len(self.i2w)
def save(self, path):
with open(path, 'w') as f:
for i, w in enumerate(self.i2w):
print >> f, str(i) + '\t' + w.encode('utf-8')
@classmethod
def load(cls, path):
vocab = Vocab()
with open(path) as f:
for line in f:
w = line.strip().split('\t')[1].decode('utf-8')
vocab.add_word(w)
return vocab
|
# coding: utf-8
"""
Payoneer Mobile API
Swagger specification for https://mobileapi.payoneer.com
OpenAPI spec version: 0.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import payoneer_mobile_api
from payoneer_mobile_api.rest import ApiException
from payoneer_mobile_api.models.logout_response import LogoutResponse
class TestLogoutResponse(unittest.TestCase):
""" LogoutResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testLogoutResponse(self):
"""
Test LogoutResponse
"""
model = payoneer_mobile_api.models.logout_response.LogoutResponse()
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#
# csgo documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 15 03:43:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from csgo import __version__, __author__
project = u'csgo'
copyright = u'2016, %s' % __author__
author = __author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'csgo v'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'csgodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'csgo.tex', u'csgo Documentation',
version, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'csgo', u'csgo Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'csgo', u'csgo Documentation',
author, 'csgo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# LINK EXTERNAL DOCS
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6', None),
'gevent': ('http://www.gevent.org', None),
'requests': ('https://2.python-requests.org/en/master/', None),
'steam': ('https://steam.readthedocs.io/en/stable/', None),
}
# AUTODOC
autodoc_member_order = 'bysource'
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import IQCashTestFramework
class CreateCache(IQCashTestFramework):
# Test network and test nodes are not required:
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
# Initialize PoS chain (it will automatically generate PoW chain too)
self._initialize_chain(toPosPhase=True)
def set_test_params(self):
self.num_nodes = 0
self.supports_cli = True
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
|
# link: https://leetcode.com/problems/partition-labels/
class Solution(object):
def partitionLabels(self, S):
"""
:type S: str
:rtype: List[int]
"""
start = 0
length = 0
dic = {c:i for i,c in enumerate(S)}
result = []
for i in range(len(S)):
start = max(start,dic[S[i]])
if i==start:
result.append(i-length+1)
length = i+1
return result
|
# SPDX-License-Identifier: Apache-2.0
# To register a converter for scikit-learn operators,
# import associated modules here.
from . import ada_boost
from . import array_feature_extractor
from . import bagging
from . import binariser
from . import calibrated_classifier_cv
from . import cast_op
from . import concat_op
from . import cross_decomposition
from . import decision_tree
from . import decomposition
from . import dict_vectoriser
from . import feature_selection
from . import flatten_op
from . import function_transformer
from . import gaussian_process
from . import gaussian_mixture
from . import gradient_boosting
from . import grid_search_cv
from . import id_op
from . import imputer_op
from . import isolation_forest
from . import k_bins_discretiser
from . import k_means
from . import label_binariser
from . import label_encoder
from . import linear_classifier
from . import linear_regressor
from . import multilayer_perceptron
from . import multiply_op
from . import naive_bayes
from . import nearest_neighbours
from . import normaliser
from . import one_hot_encoder
from . import one_vs_rest_classifier
from . import ordinal_encoder
from . import polynomial_features
from . import power_transformer
from . import random_forest
from . import random_projection
from . import ransac_regressor
from . import scaler_op
from . import sgd_classifier
from . import stacking
from . import support_vector_machines
from . import text_vectoriser
from . import tfidf_transformer
from . import tfidf_vectoriser
from . import voting_classifier
from . import voting_regressor
from . import zip_map
__all__ = [
ada_boost,
array_feature_extractor,
bagging,
binariser,
calibrated_classifier_cv,
cast_op,
concat_op,
cross_decomposition,
decision_tree,
decomposition,
dict_vectoriser,
feature_selection,
flatten_op,
function_transformer,
gaussian_process,
gaussian_mixture,
gradient_boosting,
grid_search_cv,
id_op,
imputer_op,
isolation_forest,
k_bins_discretiser,
k_means,
label_binariser,
label_encoder,
linear_classifier,
linear_regressor,
multilayer_perceptron,
multiply_op,
naive_bayes,
nearest_neighbours,
normaliser,
one_hot_encoder,
one_vs_rest_classifier,
ordinal_encoder,
polynomial_features,
power_transformer,
random_forest,
random_projection,
ransac_regressor,
scaler_op,
sgd_classifier,
stacking,
support_vector_machines,
text_vectoriser,
tfidf_transformer,
tfidf_vectoriser,
voting_classifier,
voting_regressor,
zip_map,
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Implementation of the Apriori algorithm.
"""
# We use semantic versioning
# See https://semver.org/
__version__ = "1.1.1"
import sys
from efficient_apriori.apriori import apriori
from efficient_apriori.itemsets import itemsets_from_transactions
from efficient_apriori.rules import Rule, generate_rules_apriori
def run_tests():
"""
Run all tests.
"""
import pytest
import os
base, _ = os.path.split(__file__)
pytest.main(args=[base, "--doctest-modules"])
#if (sys.version_info[0] < 3) or (sys.version_info[1] < 6):
# msg = "The `efficient_apriori` package only works for Python 3.6+."
# raise Exception(msg)
|
#
# @lc app=leetcode id=4 lang=python3
#
# [4] Median of Two Sorted Arrays
#
# @lc code=start
from typing import List
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
pass
# @lc code=end
|
# Automatically generated from poetry/pyproject.toml
# flake8: noqa
# -*- coding: utf-8 -*-
from setuptools import setup
packages = \
['c7n',
'c7n.actions',
'c7n.filters',
'c7n.reports',
'c7n.resources',
'c7n.ufuncs']
package_data = \
{'': ['*']}
install_requires = \
['argcomplete>=1.11.1,<2.0.0',
'boto3>=1.12.31,<2.0.0',
'importlib-metadata>1.7.0',
'jsonpickle==1.3',
'jsonschema>=3.2.0,<4.0.0',
'python-dateutil>=2.8.1,<3.0.0',
'pyyaml>=5.3,<6.0',
'tabulate>=0.8.6,<0.9.0']
entry_points = \
{'console_scripts': ['custodian = c7n.cli:main']}
setup_kwargs = {
'name': 'c7n',
'version': '0.9.10',
'description': 'Cloud Custodian - Policy Rules Engine',
'long_description': 'Cloud Custodian\n=================\n\n<p align="center"><img src="https://cloudcustodian.io/img/logo_capone_devex_cloud_custodian.svg" alt="Cloud Custodian Logo" width="200px" height="200px" /></p>\n\n---\n\n[](https://gitter.im/cloud-custodian/cloud-custodian?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n[](https://github.com/cloud-custodian/cloud-custodian/actions?query=workflow%3ACI+branch%3Amaster+event%3Apush)\n[](https://dev.azure.com/cloud-custodian/cloud-custodian/_build)\n[](https://www.apache.org/licenses/LICENSE-2.0)\n[](https://codecov.io/gh/cloud-custodian/cloud-custodian)\n[](https://requires.io/github/cloud-custodian/cloud-custodian/requirements/?branch=master)\n[](https://bestpractices.coreinfrastructure.org/projects/3402)\n\nCloud Custodian is a rules engine for managing public cloud accounts and\nresources. It allows users to define policies to enable a well managed\ncloud infrastructure, that\\\'s both secure and cost optimized. It\nconsolidates many of the adhoc scripts organizations have into a\nlightweight and flexible tool, with unified metrics and reporting.\n\nCustodian can be used to manage AWS, Azure, and GCP environments by\nensuring real time compliance to security policies (like encryption and\naccess requirements), tag policies, and cost management via garbage\ncollection of unused resources and off-hours resource management.\n\nCustodian policies are written in simple YAML configuration files that\nenable users to specify policies on a resource type (EC2, ASG, Redshift,\nCosmosDB, PubSub Topic) and are constructed from a vocabulary of filters\nand actions.\n\nIt integrates with the cloud native serverless capabilities of each\nprovider to provide for real time enforcement of policies with builtin\nprovisioning. Or it can be run as a simple cron job on a server to\nexecute against large existing fleets.\n\nCloud Custodian is a CNCF Sandbox project, lead by a community of hundreds\nof contributors.\n\nFeatures\n--------\n\n- Comprehensive support for public cloud services and resources with a\n rich library of actions and filters to build policies with.\n- Supports arbitrary filtering on resources with nested boolean\n conditions.\n- Dry run any policy to see what it would do.\n- Automatically provisions serverless functions and event sources (\n AWS CloudWatchEvents, AWS Config Rules, Azure EventGrid, GCP\n AuditLog & Pub/Sub, etc)\n- Cloud provider native metrics outputs on resources that matched a\n policy\n- Structured outputs into cloud native object storage of which\n resources matched a policy.\n- Intelligent cache usage to minimize api calls.\n- Supports multi-account/subscription/project usage.\n- Battle-tested - in production on some very large cloud environments.\n\nLinks\n-----\n\n- [Homepage](http://cloudcustodian.io)\n- [Docs](http://cloudcustodian.io/docs/index.html)\n- [Developer Install](https://cloudcustodian.io/docs/developer/installing.html)\n- [Presentations](https://www.google.com/search?q=cloud+custodian&source=lnms&tbm=vid)\n\nQuick Install\n-------------\n\n```shell\n$ python3 -m venv custodian\n$ source custodian/bin/activate\n(custodian) $ pip install c7n\n```\n\n\nUsage\n-----\n\nThe first step to using Cloud Custodian is writing a YAML file\ncontaining the policies that you want to run. Each policy specifies\nthe resource type that the policy will run on, a set of filters which\ncontrol resources will be affected by this policy, actions which the policy\nwith take on the matched resources, and a mode which controls which\nhow the policy will execute.\n\nThe best getting started guides are the cloud provider specific tutorials.\n\n - [AWS Getting Started](https://cloudcustodian.io/docs/aws/gettingstarted.html)\n - [Azure Getting Started](https://cloudcustodian.io/docs/azure/gettingstarted.html)\n - [GCP Getting Started](https://cloudcustodian.io/docs/gcp/gettingstarted.html)\n\nAs a quick walk through, below are some sample policies for AWS resources.\n\n 1. will enforce that no S3 buckets have cross-account access enabled.\n 1. will terminate any newly launched EC2 instance that do not have an encrypted EBS volume.\n 1. will tag any EC2 instance that does not have the follow tags\n "Environment", "AppId", and either "OwnerContact" or "DeptID" to\n be stopped in four days.\n\n```yaml\npolicies:\n - name: s3-cross-account\n description: |\n Checks S3 for buckets with cross-account access and\n removes the cross-account access.\n resource: aws.s3\n region: us-east-1\n filters:\n - type: cross-account\n actions:\n - type: remove-statements\n statement_ids: matched\n\n - name: ec2-require-non-public-and-encrypted-volumes\n resource: aws.ec2\n description: |\n Provision a lambda and cloud watch event target\n that looks at all new instances and terminates those with\n unencrypted volumes.\n mode:\n type: cloudtrail\n role: CloudCustodian-QuickStart\n events:\n - RunInstances\n filters:\n - type: ebs\n key: Encrypted\n value: false\n actions:\n - terminate\n\n - name: tag-compliance\n resource: aws.ec2\n description: |\n Schedule a resource that does not meet tag compliance policies to be stopped in four days. Note a separate policy using the`marked-for-op` filter is required to actually stop the instances after four days.\n filters:\n - State.Name: running\n - "tag:Environment": absent\n - "tag:AppId": absent\n - or:\n - "tag:OwnerContact": absent\n - "tag:DeptID": absent\n actions:\n - type: mark-for-op\n op: stop\n days: 4\n```\n\nYou can validate, test, and run Cloud Custodian with the example policy with these commands:\n\n```shell\n# Validate the configuration (note this happens by default on run)\n$ custodian validate policy.yml\n\n# Dryrun on the policies (no actions executed) to see what resources\n# match each policy.\n$ custodian run --dryrun -s out policy.yml\n\n# Run the policy\n$ custodian run -s out policy.yml\n```\n\nYou can run Cloud Custodian via Docker as well:\n\n```shell\n# Download the image\n$ docker pull cloudcustodian/c7n\n$ mkdir output\n\n# Run the policy\n#\n# This will run the policy using only the environment variables for authentication\n$ docker run -it \\\n -v $(pwd)/output:/home/custodian/output \\\n -v $(pwd)/policy.yml:/home/custodian/policy.yml \\\n --env-file <(env | grep "^AWS\\|^AZURE\\|^GOOGLE") \\\n cloudcustodian/c7n run -v -s /home/custodian/output /home/custodian/policy.yml\n\n# Run the policy (using AWS\'s generated credentials from STS)\n#\n# NOTE: We mount the ``.aws/credentials`` and ``.aws/config`` directories to\n# the docker container to support authentication to AWS using the same credentials\n# credentials that are available to the local user if authenticating with STS.\n\n$ docker run -it \\\n -v $(pwd)/output:/home/custodian/output \\\n -v $(pwd)/policy.yml:/home/custodian/policy.yml \\\n -v $(cd ~ && pwd)/.aws/credentials:/home/custodian/.aws/credentials \\\n -v $(cd ~ && pwd)/.aws/config:/home/custodian/.aws/config \\\n --env-file <(env | grep "^AWS") \\\n cloudcustodian/c7n run -v -s /home/custodian/output /home/custodian/policy.yml\n```\n\nThe [custodian cask\ntool](https://cloudcustodian.io/docs/tools/cask.html) is a go binary\nthat provides a transparent front end to docker that mirors the regular\ncustodian cli, but automatically takes care of mounting volumes.\n\nConsult the documentation for additional information, or reach out on gitter.\n\nCloud Provider Specific Help\n----------------------------\n\nFor specific instructions for AWS, Azure, and GCP, visit the relevant getting started page.\n\n- [AWS](https://cloudcustodian.io/docs/aws/gettingstarted.html)\n- [Azure](https://cloudcustodian.io/docs/azure/gettingstarted.html)\n- [GCP](https://cloudcustodian.io/docs/gcp/gettingstarted.html)\n\nGet Involved\n------------\n\n- [Gitter](https://gitter.im/cloud-custodian/cloud-custodian)\n- [GitHub](https://github.com/cloud-custodian/cloud-custodian)\n- [Mailing List](https://groups.google.com/forum/#!forum/cloud-custodian)\n- [Reddit](https://reddit.com/r/cloudcustodian)\n- [StackOverflow](https://stackoverflow.com/questions/tagged/cloudcustodian)\n\nAdditional Tools\n----------------\n\nThe Custodian project also develops and maintains a suite of additional\ntools here\n<https://github.com/cloud-custodian/cloud-custodian/tree/master/tools>:\n\n- [**_Org_:**](https://cloudcustodian.io/docs/tools/c7n-org.html) Multi-account policy execution.\n\n- [**_PolicyStream_:**](https://cloudcustodian.io/docs/tools/c7n-policystream.html) Git history as stream of logical policy changes.\n\n- [**_Salactus_:**](https://cloudcustodian.io/docs/tools/c7n-salactus.html) Scale out s3 scanning.\n\n- [**_Mailer_:**](https://cloudcustodian.io/docs/tools/c7n-mailer.html) A reference implementation of sending messages to users to notify them.\n\n- [**_Trail Creator_:**](https://cloudcustodian.io/docs/tools/c7n-trailcreator.html) Retroactive tagging of resources creators from CloudTrail\n\n- **_TrailDB_:** Cloudtrail indexing and time series generation for dashboarding.\n\n- [**_LogExporter_:**](https://cloudcustodian.io/docs/tools/c7n-logexporter.html) Cloud watch log exporting to s3\n\n- [**_Cask_:**](https://cloudcustodian.io/docs/tools/cask.html) Easy custodian exec via docker\n\n- [**_Guardian_:**](https://cloudcustodian.io/docs/tools/c7n-guardian.html) Automated multi-account Guard Duty setup\n\n- [**_Omni SSM_:**](https://cloudcustodian.io/docs/tools/omnissm.html) EC2 Systems Manager Automation\n\n- [**_Mugc_:**](https://github.com/cloud-custodian/cloud-custodian/tree/master/tools/ops#mugc) A utility used to clean up Cloud Custodian Lambda policies that are deployed in an AWS environment.\n\nContributing\n------------\n\nSee <https://cloudcustodian.io/docs/contribute.html>\n\nSecurity\n--------\n\nIf you\'ve found a security related issue, a vulnerability, or a\npotential vulnerability in Cloud Custodian please let the Cloud\n[Custodian Security Team](mailto:security@cloudcustodian.io) know with\nthe details of the vulnerability. We\'ll send a confirmation email to\nacknowledge your report, and we\'ll send an additional email when we\'ve\nidentified the issue positively or negatively.\n\nCode of Conduct\n---------------\n\nThis project adheres to the [Open Code of Conduct](https://developer.capitalone.com/resources/code-of-conduct). By\nparticipating, you are expected to honor this code.\n\n',
'long_description_content_type': 'text/markdown',
'author': 'Cloud Custodian Project',
'author_email': None,
'maintainer': None,
'maintainer_email': None,
'url': 'https://cloudcustodian.io',
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'entry_points': entry_points,
'python_requires': '>=3.6,<4.0',
}
setup(**setup_kwargs)
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
from typing import List
import pytest
from airbyte_protocol.models.airbyte_protocol import DestinationSyncMode, SyncMode
from normalization.destination_type import DestinationType
from normalization.transform_catalog.stream_processor import StreamProcessor
from normalization.transform_catalog.table_name_registry import TableNameRegistry
@pytest.fixture(scope="function", autouse=True)
def before_tests(request):
# This makes the test run whether it is executed from the tests folder (with pytest/gradle)
# or from the base-normalization folder (through pycharm)
unit_tests_dir = os.path.join(request.fspath.dirname, "unit_tests")
if os.path.exists(unit_tests_dir):
os.chdir(unit_tests_dir)
else:
os.chdir(request.fspath.dirname)
yield
os.chdir(request.config.invocation_dir)
@pytest.mark.parametrize(
"cursor_field, expecting_exception, expected_cursor_field",
[
(None, False, "_airbyte_emitted_at"),
(["updated_at"], False, "updated_at"),
(["_airbyte_emitted_at"], False, "_airbyte_emitted_at"),
(["parent", "nested_field"], True, "nested_field"),
],
)
def test_cursor_field(cursor_field: List[str], expecting_exception: bool, expected_cursor_field: str):
stream_processor = StreamProcessor.create(
stream_name="test_cursor_field",
destination_type=DestinationType.POSTGRES,
default_schema="default_schema",
raw_schema="raw_schema",
schema="schema_name",
source_sync_mode=SyncMode.incremental,
destination_sync_mode=DestinationSyncMode.append_dedup,
cursor_field=cursor_field,
primary_key=[],
json_column_name="json_column_name",
properties=dict(),
tables_registry=TableNameRegistry(DestinationType.POSTGRES),
from_table="",
)
try:
assert (
stream_processor.get_cursor_field(column_names={expected_cursor_field: (expected_cursor_field, "random")})
== expected_cursor_field
)
except ValueError as e:
if not expecting_exception:
raise e
@pytest.mark.parametrize(
"primary_key, column_type, expecting_exception, expected_primary_keys, expected_final_primary_key_string",
[
([["id"]], "string", False, ["id"], "{{ adapter.quote('id') }}"),
([["id"]], "number", False, ["id"], "cast({{ adapter.quote('id') }} as {{ dbt_utils.type_string() }})"),
([["first_name"], ["last_name"]], "string", False, ["first_name", "last_name"], "first_name, last_name"),
([["float_id"]], "number", False, ["float_id"], "cast(float_id as {{ dbt_utils.type_string() }})"),
([["_airbyte_emitted_at"]], "string", False, [], "cast(_airbyte_emitted_at as {{ dbt_utils.type_string() }})"),
(None, "string", True, [], ""),
([["parent", "nested_field"]], "string", True, [], ""),
],
)
def test_primary_key(
primary_key: List[List[str]],
column_type: str,
expecting_exception: bool,
expected_primary_keys: List[str],
expected_final_primary_key_string: str,
):
stream_processor = StreamProcessor.create(
stream_name="test_primary_key",
destination_type=DestinationType.POSTGRES,
raw_schema="raw_schema",
default_schema="default_schema",
schema="schema_name",
source_sync_mode=SyncMode.incremental,
destination_sync_mode=DestinationSyncMode.append_dedup,
cursor_field=[],
primary_key=primary_key,
json_column_name="json_column_name",
properties={key: {"type": column_type} for key in expected_primary_keys},
tables_registry=TableNameRegistry(DestinationType.POSTGRES),
from_table="",
)
try:
assert stream_processor.get_primary_key(column_names=stream_processor.extract_column_names()) == expected_final_primary_key_string
except ValueError as e:
if not expecting_exception:
raise e
|
water_names = [
'H2O',
'HHO',
'OHH',
'HOH',
'OH2',
'SOL',
'WAT',
'TIP',
'TIP2',
'TIP3',
'TIP4'
]
|
# AudioCollection - manage audio collections and databases
# Bregman - python toolkit for music information retrieval
__version__ = '1.0'
__author__ = 'Michael A. Casey'
__copyright__ = "Copyright (C) 2010 Michael Casey, Dartmouth College, All Rights Reserved"
__license__ = "gpl 2.0 or higher"
__email__ = 'mcasey@dartmouth.edu'
import os
import os.path
import time
import glob
import tempfile
import shutil
import subprocess
import hashlib
import random
import error
import features
try: # OSX / Linux
BREGMAN_ROOT = os.environ['HOME']
except: # Windows
BREGMAN_ROOT = os.environ['HOMEPATH']
DATA_ROOT = BREGMAN_ROOT + os.sep + "exp"
class AudioCollection:
"""
::
A class for extracting, persisting, and searching audio features in a collection
Initialization:
AudioCollection("/path/to/collection")
Instantiate a new collection at the given path. This directory is where features and
audioDB databases will be stored. The collection path does not have to be the same as the
audio files path for inserted audio. Audio can come from any location, but features are consolodated into
the AudioCollection path.
"""
collection_stem = "collection_"
def __init__(self, path=None, root_path=DATA_ROOT):
self.root_path = root_path
self.collection_path = path
self.adb_path = None
self.adb = None
self.rTupleList = None
self.feature_params = features.Features.default_feature_params()
self.audio_collection = set()
self.cache_temporary_files = False
self.uid="%016X"%long(random.random()*100000000000000000L) # unique instance ID
self.old_uid=self.uid
self.adb_data_size=256
self.adb_ntracks=20000
def insert_audio_files(self, audio_list):
"""
::
Maintain a set of audio files as a collection. Each item is unique in the set. Collisions are ignored.
"""
for item in audio_list: self.audio_collection.add(item)
def _gen_adb_hash(self):
"""
::
Generate a hash key based on self.feature_params to make an audioDB instance unique to feature set.
"""
m = hashlib.md5()
k = self.feature_params.keys()
k.sort() # ensure vals ordered by key lexicographical order
vals = [self.feature_params.get(i) for i in k]
m.update(vals.__repr__())
return m.hexdigest()
def _insert_features_into_audioDB(self):
"""
::
Persist features, powers, and audio-file keys in an audioDB instance.
Inserts self.rTupleList into audioDB instance associated with current feature set.
If features already exist, warn once, but continue.
Given an adb-instance path, exectute the audioDB command to:
Make lists of features, powers, and database keys
Batch insert features, powers, linked to databse keys
Returns full path to audioDB instance if OK or None of not OK
"""
self._new_adb() # test to see if we require a new audiodb instance for features
pth, sep, nm = self.adb_path.rpartition(os.sep)
# List names for audioDB insertion
fListName = pth + os.sep + self.uid + "_fList.txt"
pListName = pth + os.sep + self.uid + "_pList.txt"
kListName = pth + os.sep + self.uid + "_kList.txt"
# unpack the names of files we want to insert
fList, pList, kList = zip(*self.rTupleList)
# write fList, pList, kList to text files
self._write_list_to_file(fList, fListName)
self._write_list_to_file(pList, pListName)
self._write_list_to_file(kList, kListName)
# do BATCHINSERT
command = ["audioDB", "--BATCHINSERT", "-d", self.adb_path, "-F", fListName, "-W", pListName, "-K", kListName]
self._do_subprocess(command)
return 1
def _write_list_to_file(self,lst, pth):
"""
::
Utility routine to write a list of strings to a text file
"""
try:
f = open(pth,"w")
except:
print("Error opening: ", pth)
raise error.BregmanError()
for s in lst: f.write(s+"\n")
f.close()
def extract_features(self, key=None, keyrepl=None, extract_only=False, wave_suffix=".wav"):
"""
::
Extract features over the collection
Pre-requisites:
self.audio_collection - set of audio files to extract
self.feature_params - features to extract
Arguments:
key - optional string to append on filename stem as database key
keyrepl - if key is specified then keyrepl is a pattern to replace with key
extract_only - set to True to skip audioDB insertion
wave_suffix - fileName extension for key replacement
Returns rTupleList of features,powers,keys or None if fail.
"""
aList, fList, pList, kList = self._get_extract_lists(key, keyrepl, wave_suffix)
self._fftextract_list(zip(aList,fList,pList,kList))
self.rTupleList = zip(fList,pList,kList) # what will be inserted into audioDB
if not extract_only:
self._insert_features_into_audioDB() # possibly generate new audiodb instance
self.audio_collection.clear() # clear the audio_collection queue
return self.rTupleList
def _get_extract_lists(self, key=None, keyrepl=None, wave_suffix=".wav"):
"""
::
Map from self.audio_collection to aList, fList, pList, kList
"""
# The audio queue
aList = list(self.audio_collection)
aList.sort()
# The keys that will identify managed items
if key == None:
kList = aList # use the audio file names as keys
else:
# replace keyrepl with key as database key
if not keyrepl:
print("key requires keyrepl for filename substitution")
raise error.BregmanError()
kList = [a.replace(keyrepl,key) for a in aList]
feature_suffix= self._get_feature_suffix()
power_suffix=self.feature_params['power_ext']
fList = [k.replace(wave_suffix, feature_suffix) for k in kList]
pList = [k.replace(wave_suffix, power_suffix) for k in kList]
return (aList, fList, pList, kList)
def _get_feature_suffix(self):
"""
::
Return a standardized feature suffix for extracted features
"""
return "." + self.feature_params['feature'] + "%02d"%self.feature_params['ncoef']
def _fftextract_list(self, extract_list):
command=[]
feature_keys = {'stft':'-f', 'cqft':'-q', 'mfcc':'-m', 'chroma':'-c', 'power':'-P', 'hram':'-H'}
feat = feature_keys[self.feature_params['feature']]
ncoef = "%d"%self.feature_params['ncoef']
nfft = "%d"%self.feature_params['nfft']
wfft = "%d"%self.feature_params['wfft']
nhop = "%d"%self.feature_params['nhop']
logl = "%d"%self.feature_params['log10']
mag = "%d"%self.feature_params['magnitude']
lo = "%f"%self.feature_params['lo']
hi = "%f"%self.feature_params['hi']
# lcoef = "%d"%self.feature_params['lcoef'] # not used yet
for a,f,p,k in extract_list:
if not len(glob.glob(f)):
command=["fftExtract", "-p", "bregman.wis",
"-n", nfft, "-w", wfft, "-h", nhop, feat, ncoef,
"-l", lo, "-i", hi, "-g" , logl, "-a", mag, a, f]
ret = self._do_subprocess(command)
if ret:
print("Error extacting features: ", command)
return None
else:
print("Warning: feature file already exists", f)
if not len(glob.glob(p)):
command=["fftExtract", "-p", "bregman.wis",
"-n", nfft, "-w", wfft, "-h", nhop,
"-P", "-l", lo, "-i", hi, a, p]
ret = self._do_subprocess(command)
if ret:
print("Error extacting powers: ", command)
return None
else:
print("Warning: power file already exists", p)
def _remove_temporary_files(self, key=""):
"""
::
Remove cached feature and power files based on current feature_params settings.
"""
fList = glob.glob(self.collection_path+os.sep + "*" + key + "."
+ self.feature_params['feature']+"%02d"%self.feature_params['ncoef'])
for f in fList: os.remove(f)
pList = glob.glob(self.collection_path + os.sep + "*" + key + self.feature_params["power_ext"] )
for p in pList: os.remove(p)
def initialize(self):
"""
::
Make a new collection path with an empty audioDB instance.
Each instance is unique to a set of feature_params.
Return False if an equivalent instance already exists.
Return True if new instance was created.
"""
if not self._gen_collection_path(self.collection_stem):
return 0
print("Made new directory: ", self.collection_path)
# self._new_adb() # This is now done on feature_insert
return 1
def _gen_adb_path(self):
"""
::
Name a new adb instance
"""
if not self.collection_path:
print("Error: self.collection_path not set")
raise error.BregmanError()
adb_path = self.collection_path + os.sep + self.collection_stem + self._gen_adb_hash() +".adb"
return adb_path
def _gen_collection_path(self, name_prefix):
"""
::
Make a new unique directory in the self.root_path directory
"""
self.collection_path = tempfile.mkdtemp(prefix=name_prefix,dir=self.root_path)
if not self.collection_path:
print("Error making new directory in location: ", self.root_path)
return 0
shutil.copymode(self.root_path, self.collection_path) # set permissions
return 1
def _new_adb(self):
"""
::
Make a new audioDB instance in the adb_path location
Make database L2norm and Power compliant
"""
self.adb_path = self._gen_adb_path()
if self.adb_path == None:
print("self.adb_path must have a string value")
raise error.BregmanError()
else:
f = None
try:
f = open(self.adb_path,"rb")
except:
print("Making new audioDB database: ", self.adb_path)
finally:
if f:
f.close()
print("AudioDB database already exists: ", self.adb_path)
return 0
# create a NEW audiodb database instance
command = ["audioDB", "--NEW", "-d", self.adb_path, "--datasize", "%d"%self.adb_data_size, "--ntracks", "%d"%self.adb_ntracks]
self._do_subprocess(command)
# make L2NORM compliant
command = ["audioDB", "--L2NORM", "-d", self.adb_path]
self._do_subprocess(command)
# make POWER compliant
command = ["audioDB", "--POWER", "-d", self.adb_path]
self._do_subprocess(command)
return 1
def _do_subprocess(self,command):
"""
::
Call an external (shell) command, inform about any errors
"""
res = subprocess.call(command)
if res:
print("Error in ", command)
raise error.BregmanError()
return res
def load(self, path=None):
"""
::
Load stored data for this collection
"""
pass
def save(self):
"""
::
Save data for this collection
"""
pass
def toc(self, collection_path=None):
"""
::
List contents of this collection, or collection at collection_path.
"""
if collection_path == None:
collection_path = self.collection_path
dlist=glob.glob(collection_path + os.sep + "*.data")
toclist = []
for d in dlist:
self.load(d)
toclist.append(self.feature_params)
return zip(dlist,toclist)
@classmethod
def lc(cls, expand=False, limit=None):
"""
::
Alias for ls_collections()
"""
return cls.ls_collections(expand, limit)
@classmethod
def ls_collections(cls, expand=False, limit=None):
"""
::
For the given class, return a list of instances
If expand is set to True, pint each collection's TOC
"""
dlist, tm = cls._get_collection_list_by_time()
dlist = zip(dlist[:limit], tm[:limit])
if expand:
R = cls()
k = R.toc(dlist[0][0])
for d,t in dlist:
print(d, t)
print(k[0][1].keys())
for i, v in enumerate(R.toc(d)):
print("[%d]"%i, v[1].values())
print("")
return dlist
@classmethod
def _get_collection_list_by_time(cls):
dlist = glob.glob(DATA_ROOT + os.sep + cls.collection_stem + "*")
# sort into descending order of time
tm = {}
for d in dlist: tm[ d ] = os.path.getmtime( d )
dlist.sort( lambda x,y: cmp( tm[x], tm[y] ) )
dlist.reverse()
tm = [time.ctime(tm[d]) for d in dlist]
return (dlist, tm)
|
# Copyright 2020 Tier IV, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
import launch
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import ComposableNodeContainer, LoadComposableNodes
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
launch_arguments = []
def add_launch_arg(name: str, default_value=None):
# a default_value of None is equivalent to not passing that kwarg at all
launch_arguments.append(DeclareLaunchArgument(name, default_value=default_value))
ssd_fine_detector_share_dir = get_package_share_directory(
'traffic_light_ssd_fine_detector'
)
classifier_share_dir = get_package_share_directory(
'traffic_light_classifier'
)
add_launch_arg('enable_fine_detection', 'True')
add_launch_arg('input/image', '/sensing/camera/traffic_light/image_raw')
# traffic_light_ssd_fine_detector
add_launch_arg('onnx_file',
os.path.join(ssd_fine_detector_share_dir, 'data', 'mb2-ssd-lite-tlr.onnx'))
add_launch_arg('label_file',
os.path.join(ssd_fine_detector_share_dir, 'data', 'voc_labels_tl.txt'))
add_launch_arg('fine_detector_precision', 'FP32')
add_launch_arg('score_thresh', '0.7')
add_launch_arg('max_batch_size', '8')
add_launch_arg('approximate_sync', 'False')
add_launch_arg('mean', '[0.5, 0.5, 0.5]')
add_launch_arg('std', '[0.5, 0.5, 0.5]')
# traffic_light_classifier
add_launch_arg('classifier_type', '1')
add_launch_arg('model_file_path',
os.path.join(classifier_share_dir,
'data',
'traffic_light_classifier_mobilenetv2.onnx'))
add_launch_arg('label_file_path',
os.path.join(classifier_share_dir, 'data', 'lamp_labels.txt'))
add_launch_arg('precision', 'fp16')
add_launch_arg('input_c', '3')
add_launch_arg('input_h', '224')
add_launch_arg('input_w', '224')
def create_parameter_dict(*args):
result = {}
for x in args:
result[x] = LaunchConfiguration(x)
return result
container = ComposableNodeContainer(
name='traffic_light_node_container',
namespace='/perception/traffic_light_recognition',
package='rclcpp_components',
executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='traffic_light_classifier',
plugin='traffic_light::TrafficLightClassifierNodelet',
name='traffic_light_classifier',
parameters=[create_parameter_dict('approximate_sync', 'classifier_type',
'model_file_path', 'label_file_path',
'precision', 'input_c', 'input_h', 'input_w')],
remappings=[('input/image', LaunchConfiguration('input/image')),
('input/rois', 'rois'),
('output/traffic_light_states', 'traffic_light_states')]
),
ComposableNode(
package='traffic_light_visualization',
plugin='traffic_light::TrafficLightRoiVisualizerNodelet',
name='traffic_light_roi_visualizer',
parameters=[create_parameter_dict('enable_fine_detection')],
remappings=[('input/image', LaunchConfiguration('input/image')),
('input/rois', 'rois'),
('input/rough/rois', 'rough/rois'),
('input/traffic_light_states', 'traffic_light_states'),
('output/image', 'debug/rois'),
('output/image/compressed', 'debug/rois/compressed'),
('output/image/compressedDepth', 'debug/rois/compressedDepth'),
('output/image/theora', 'debug/rois/theora')]
)
],
output='both',
)
ssd_fine_detector_param = create_parameter_dict('onnx_file', 'label_file',
'score_thresh', 'max_batch_size',
'approximate_sync', 'mean', 'std')
ssd_fine_detector_param['mode'] = LaunchConfiguration('fine_detector_precision')
loader = LoadComposableNodes(
composable_node_descriptions=[
ComposableNode(
package='traffic_light_ssd_fine_detector',
plugin='traffic_light::TrafficLightSSDFineDetectorNodelet',
name='traffic_light_ssd_fine_detector',
parameters=[ssd_fine_detector_param],
remappings=[('input/image', LaunchConfiguration('input/image')),
('input/rois', 'rough/rois'),
('output/rois', 'rois')]
),
],
target_container=container,
condition=launch.conditions.IfCondition(LaunchConfiguration('enable_fine_detection')),
)
return LaunchDescription(launch_arguments + [container, loader])
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
import os
from sys import stdin,stdout,stderr
import argparse
import hashlib
import subprocess
import sys
import json
import codecs
from urllib.request import Request, urlopen
from urllib.error import HTTPError
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
COMMIT_FORMAT = '%H %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
COMMIT_FORMAT = '%C(bold blue)%H%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError:
return default
def get_response(req_url, ghtoken):
req = Request(req_url)
if ghtoken is not None:
req.add_header('Authorization', 'token ' + ghtoken)
return urlopen(req)
def retrieve_json(req_url, ghtoken, use_pagination=False):
'''
Retrieve json from github.
Return None if an error happens.
'''
try:
reader = codecs.getreader('utf-8')
if not use_pagination:
return json.load(reader(get_response(req_url, ghtoken)))
obj = []
page_num = 1
while True:
req_url_page = '{}?page={}'.format(req_url, page_num)
result = get_response(req_url_page, ghtoken)
obj.extend(json.load(reader(result)))
link = result.headers.get('link', None)
if link is not None:
link_next = [l for l in link.split(',') if 'rel="next"' in l]
if len(link_next) > 0:
page_num = int(link_next[0][link_next[0].find("page=")+5:link_next[0].find(">")])
continue
break
return obj
except HTTPError as e:
error_message = e.read()
print('Warning: unable to retrieve pull information from github: %s' % e)
print('Detailed error: %s' % error_message)
return None
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def retrieve_pr_info(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/pulls/"+pull
return retrieve_json(req_url,ghtoken)
def retrieve_pr_comments(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/issues/"+pull+"/comments"
return retrieve_json(req_url,ghtoken,use_pagination=True)
def retrieve_pr_reviews(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/pulls/"+pull+"/reviews"
return retrieve_json(req_url,ghtoken,use_pagination=True)
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def get_acks_from_comments(head_commit, comments):
assert len(head_commit) == 6
ack_str ='\n\nACKs for commit {}:\n'.format(head_commit)
for c in comments:
review = [l for l in c['body'].split('\r\n') if 'ACK' in l and head_commit in l]
if review:
ack_str += ' {}:\n'.format(c['user']['login'])
ack_str += ' {}\n'.format(review[0])
return ack_str
def print_merge_details(pull, title, branch, base_branch, head_branch):
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
user.ghtoken (default: none).
githubmerge.host (default: git@github.com),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','git@github.com')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
ghtoken = git_config_get('user.ghtoken')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
sys.exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
sys.exit(1)
if host.startswith(('https:','http:')):
host_repo = host+"/"+repo+".git"
else:
host_repo = host+":"+repo
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull,ghtoken)
if info is None:
sys.exit(1)
comments = retrieve_pr_comments(repo,pull,ghtoken) + retrieve_pr_reviews(repo,pull,ghtoken)
if comments is None:
sys.exit(1)
title = info['title'].strip()
body = info['body'].strip()
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull, 'w', encoding="utf8")
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*',
'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot find pull request #%s or branch %s on %s." % (pull,branch,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%H %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
message += get_acks_from_comments(head_commit=subprocess.check_output([GIT,'log','-1','--pretty=format:%H',head_branch]).decode('utf-8')[:6], comments=comments)
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','--no-gpg-sign','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
sys.exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
sys.exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
sys.exit(4)
# Put tree SHA512 into the message
try:
first_sha512 = tree_sha512sum()
message += '\n\nTree-SHA512: ' + first_sha512
except subprocess.CalledProcessError:
print("ERROR: Unable to compute tree hash")
sys.exit(4)
try:
subprocess.check_call([GIT,'commit','--amend','--no-gpg-sign','-m',message.encode('utf-8')])
except subprocess.CalledProcessError:
print("ERROR: Cannot update message.", file=stderr)
sys.exit(4)
print_merge_details(pull, title, branch, base_branch, head_branch)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
sys.exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
sys.exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
sys.exit(8)
# Sign the merge commit.
print_merge_details(pull, title, branch, base_branch, head_branch)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
sys.exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
break
elif reply == 'x':
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
'''
So I heard you want to use OAuth2? This is a helper tool that gets the
authenticaton code for you and fires it into praw.ini.
How to use:
- Visit: https://www.reddit.com/prefs/apps
- Create new "script", under "redirect uri" put http://127.0.0.1:65010
- Open praw.ini
- oauth_client_id = { The ID displayed under the icon thingy }
- oauth_client_secret = { The secret }
- oauth_redirect_uri = http://127.0.0.1:65010
- Run this script
- Your browser will open to a page on Reddit listing requested perms
- Click permit
'''
import praw
import webbrowser
from warnings import warn
from praw.errors import HTTPException, OAuthAppRequired
from tornado import gen, web
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
r = praw.Reddit('Shreddit refresh token grabber')
class Page(web.RequestHandler):
def get(self):
code = self.get_argument("code", default=None, strip=False)
self.write("Success! Your code: %s<br> \
It will now be appended to praw.ini and you \
should be able to enjoy Shreddit without storing \
your user / pass anywhere." % code)
IOLoop.current().stop()
self.login(code)
def login(self, code):
deets = r.get_access_information(code)
print("oauth_refresh_token: %s" % deets['refresh_token'])
r.set_access_credentials(**deets)
with open('praw.ini', mode='a') as fh:
fh.write('oauth_refresh_token = %s' % deets['refresh_token'])
print("Refresh token written to praw.ini")
application = web.Application([(r"/", Page)])
try:
r.refresh_access_information()
except HTTPException:
url = r.get_authorize_url('uniqueKey', ['identity', 'read', 'submit', 'vote', 'edit', 'history'], True)
try:
print("Opening url: %s" % url)
webbrowser.open(url, new=2)
except NameError:
warn('''Couldn't open URL: %s\n please do so manually''' % url)
server = HTTPServer(application)
server.listen(65010)
IOLoop.current().start()
if r.user == None:
print("Failed to log in. Something went wrong!")
else:
print("Logged in as %s." % r.user)
print()
|
import argparse
import itertools
import json
from pathlib import Path
from typing import Any, Dict, List, TextIO
from allennlp.data.vocabulary import DEFAULT_PADDING_TOKEN
from allennlp.predictors import Predictor, CorefPredictor
import spacy
from spacy.matcher import Matcher
from spacy.tokens import Doc, Token
from tqdm import tqdm
SENTS_PER_EXAMPLE = 3
SEQUENCE_FIELDS = [
'tokens',
'start_chars',
'end_chars',
'tags',
'ner_ids',
'relation_roots',
'relations'
]
SPACY_PATTERNS = [
{
'pattern_flag': 'is_reference',
'patterns': [
[{'IS_DIGIT': False}, {'LOWER': '.'}, {'LOWER': 'this'}, {'OP': '+'}, {'LOWER': 'is'}, {'LOWER': 'called'}],
[{'IS_DIGIT': False}, {'LOWER': '.'}, {'LOWER': 'these'}, {'OP': '+'}, {'LOWER': 'are'}, {'LOWER': 'called'}],
]
},
{
'pattern_flag': 'is_definition',
'patterns': [
[{'POS': {'IN': ['NOUN', 'ADJ', 'DET']}, 'OP': '+'}, {'LOWER': 'is'}, {'LOWER': {'IN': ['a', 'the']}}]
],
},
{
'pattern_flag': 'is_alias',
'patterns': [
[{'LOWER': {'IN': [',', '(']}, 'OP': '?'}, {'LOWER': {'IN': ['formerly', 'commonly', 'also']}}, {'LOWER': 'called'}],
[{'LOWER': {'IN': [',', '(']}, 'OP': '?'}, {'LOWER': {'IN': ['formerly', 'commonly', 'also']}}, {'LOWER': 'known'}, {'LOWER': 'as'}]
],
},
{
'pattern_flag': 'noisy_alias',
'patterns': [
[{'LOWER': '('}, {'LOWER': 'or', 'OP': '?'}, {'POS': {'IN': ['DET', 'ADJ', 'NOUN']}}, {'LOWER': ')'}],
],
},
{
'pattern_flag': 'reference_keywords',
'patterns': [
[{'LOWER': '.'}, {'LOWER': {'IN': ['it', 'they', 'these', 'those']}}]
],
},
]
def main():
"""Script to convert a folder of deft formatted files into a jsonl file"""
parser = argparse.ArgumentParser('Convert deft format to jsonl')
parser.add_argument('input', help='Folder with deft files')
parser.add_argument('output', help='Jsonl output file')
parser.add_argument('-f', dest='force_output', action='store_true',
help='force creation of a new output file')
parser.add_argument('--cuda_device', type=int, default=-1,
help='Cuda device to use in preprocessing steps')
args = parser.parse_args()
output_file = Path(args.output)
if output_file.exists():
assert not output_file.is_dir(), 'Output must be a file'
assert args.force_output, 'Output file already exists'
input_path = Path(args.input)
assert input_path.exists() and input_path.is_dir()
with output_file.open('w') as output_file_handler:
_convert_deft_folder(input_path, output_file_handler, cuda_device=args.cuda_device)
def _convert_deft_folder(input_path: Path,
output_file: TextIO,
cuda_device: int,
with_spacy: bool = True,
with_coref: bool = True) -> None:
"""Convert all files in the given folder."""
if with_spacy:
spacy_pipeline = spacy.load('en_core_web_lg')
spacy_matcher = Matcher(spacy_pipeline.vocab)
for pattern in SPACY_PATTERNS:
flag = pattern['pattern_flag']
Token.set_extension(flag, default=False, force=True)
patterns = pattern['patterns']
callback_fn = _get_extension_labeling_fn(flag)
spacy_matcher.add(flag, callback_fn, *patterns)
else:
spacy_pipeline = None
spacy_matcher = None
if with_coref:
coref_predictor = Predictor.from_path(
archive_path="https://s3-us-west-2.amazonaws.com/allennlp/models/coref-model-2018.02.05.tar.gz",
cuda_device=cuda_device
)
# Fix issues with characters tokens smaller than the biggest convolution size
coref_predictor._dataset_reader._token_indexers['token_characters']._min_padding_length = 5
else:
coref_predictor = None
for input_file in tqdm(input_path.iterdir()):
examples = _convert_deft_file(input_file,
spacy_pipeline=spacy_pipeline,
spacy_matcher=spacy_matcher,
coref_predictor=coref_predictor)
for example in examples:
output_file.write(json.dumps(example) + '\n')
def _convert_deft_file(input_file: Path,
spacy_pipeline=None,
spacy_matcher=None,
coref_predictor: CorefPredictor=None) -> List[Dict[str, Any]]:
"""Converts a deft file into jsonl format and writes to the output file"""
examples = []
example_count = 0
with input_file.open() as file_handler:
while _peek_line(file_handler).strip() == '':
file_handler.readline() # Remove empty newlines at the beginning of a file
continue
while True:
next_line = _peek_line(file_handler)
if '\n' not in next_line:
break
example = _parse_example(file_handler,
spacy_pipeline=spacy_pipeline,
spacy_matcher=spacy_matcher,
coref_predictor=coref_predictor)
example['id'] = f'{input_file.name}##{example_count}'
examples.append(example)
example_count += 1
return examples
def _parse_example(file_handler: TextIO,
spacy_pipeline=None,
spacy_matcher=None,
coref_predictor: CorefPredictor = None) -> Dict:
"""Parses an example and does some pre-processing"""
sentences = _parse_example_sentences(file_handler)
example = {}
# Flatten list fields into a single list
for field in SEQUENCE_FIELDS:
if field in sentences[0]:
example[field] = [i for s in sentences for i in s[field]]
if spacy_pipeline is not None:
doc = _spacy_processing(example, spacy_pipeline)
example['spacy_pos'] = [t.pos_ for t in doc]
example['spacy_tag'] = [t.tag_ for t in doc]
example['spacy_dep_head'] = [t.head.i for t in doc]
example['spacy_dep_rel'] = [t.dep_ for t in doc]
spacy_matcher(doc)
for pattern in SPACY_PATTERNS:
flag = pattern['pattern_flag']
example[f'spacy_pattern_{flag}'] = [t._.get(flag) for t in doc]
if coref_predictor is not None:
doc_tokens = example['tokens']
doc_len = len(doc_tokens)
padded_doc_tokens = doc_tokens + [DEFAULT_PADDING_TOKEN] * max(0, (5-doc_len))
prediction = coref_predictor.predict_tokenized(padded_doc_tokens)
example['coref_top_spans'] = prediction['top_spans'][:doc_len]
example['coref_predicted_antecedents'] = prediction['predicted_antecedents'][:doc_len]
example['coref_clusters'] = prediction['clusters'][:doc_len]
# Concatenate sentence labels with token spans
if 'sentence_label' in sentences[0]:
sentence_labels = []
token_count = 0
for sentence in sentences:
sentence_length = len(sentence['tokens'])
start_token_idx = token_count
end_token_idx = start_token_idx + sentence_length
token_count += sentence_length
sentence_labels.append({
'label': sentence['sentence_label'],
'start_token_idx': start_token_idx,
'end_token_idx': end_token_idx
})
example['sentence_labels'] = sentence_labels
# Remove relation annotations without head nodes
if 'relations' in example:
existing_head_ids = example['ner_ids']
for token_idx in range(len(example['tokens'])):
head_id = example['relation_roots'][token_idx]
if head_id not in existing_head_ids:
example['relations'][token_idx] = '0'
example['relation_roots'][token_idx] = '-1'
return example
def _spacy_processing(example: Dict, spacy_pipeline) -> Doc:
# Determine if a character had a subsequent space by comparing its character end
# index against the character start index of the next token.
paired_char_offsets = zip(example['end_chars'][:-1],
example['start_chars'][1:])
subsequent_spaces = [char_end != char_start
for char_end, char_start in paired_char_offsets] + [False]
doc = Doc(spacy_pipeline.vocab,
words=example['tokens'],
spaces=subsequent_spaces)
spacy_pipeline.tagger(doc)
spacy_pipeline.parser(doc)
return doc
def _peek_line(file_handler) -> str:
"""Peeks into the file returns the next line"""
current_pos = file_handler.tell()
line = file_handler.readline()
file_handler.seek(current_pos)
return line
def _parse_example_sentences(file_handler: TextIO) -> Dict:
"""Parses up to three sentences from the given file handler"""
sentences = []
for sentence_count in range(SENTS_PER_EXAMPLE):
sentence = _parse_sentence(file_handler)
assert len(sentence['tokens']) > 0
sentences.append(sentence)
next_line = _peek_line(file_handler)
if next_line.strip() == '' and sentence_count < 2:
break # Break for incomplete tripes
# Read the empty separator line if present
next_line = _peek_line(file_handler)
if next_line.strip() == '':
file_handler.readline()
num_sentences = len(sentences)
assert 0 < num_sentences < 4, f'invalid sent len: {num_sentences}'
return sentences
def _parse_sentence(input_file: TextIO) -> Dict[str, Any]:
"""Parses all lines of the current sentence into a deft sentence object"""
sentence = {
'tokens': [],
'start_chars': [],
'end_chars': [],
}
sentence_label = None
tags = []
ner_ids = []
relation_roots = []
relations = []
line = input_file.readline()
while line != '':
if line.strip() == '':
if _is_chapter_start(sentence):
line = input_file.readline()
continue # End of a chapter, remove the newline and continue
break # End of sentence, stop parsing.
split_line = [l.strip() for l in line.strip().split('\t')]
# assert len(split_line) == 8, 'invalid line format: {}'.format(line)
assert len(split_line) >= 4, 'invalid line format: {}'.format(line)
sentence['tokens'].append(split_line[0])
sentence['start_chars'].append(split_line[2])
sentence['end_chars'].append(split_line[3])
if len(split_line) > 4:
tag = split_line[4]
tags.append(tag)
if tag[2:] == 'Definition':
sentence_label = 'HasDef'
elif sentence_label is None:
sentence_label = 'NoDef'
if len(split_line) > 5:
ner_ids.append(split_line[5])
relation_roots.append(split_line[6])
relations.append(split_line[7])
line = input_file.readline()
if sentence_label is not None:
sentence['sentence_label'] = sentence_label
if len(tags) > 0:
sentence['tags'] = tags
if len(ner_ids) > 0:
sentence['ner_ids'] = ner_ids
if len(relation_roots) > 0:
sentence['relation_roots'] = relation_roots
if len(relations) > 0:
sentence['relations'] = relations
return sentence
def _extract_entities(sentences: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Aggregates entity information into a separate dictionary."""
entities = []
ner_info = [info_tuple
for s in sentences
for info_tuple in zip(s['tokens'],
s['tags'],
s['ner_ids'])]
filtered_tokens = [i for i in enumerate(ner_info) if i[1][1] != 'O']
for entity_id, entity_group in itertools.groupby(filtered_tokens,
key=lambda x: x[1][2]):
token_offsets, ner_tuples = zip(*entity_group)
_, tags, ner_ids = zip(*ner_tuples)
assert ner_ids[0] == entity_id, "{} != {}".format(ner_ids[0], entity_id)
assert ner_ids[0] != '-1'
# Detect issues in the task2 tags annotations
tags = list(tags)
if not tags[0].startswith('B-'):
print('incorrect starting tag: {} of {} in {}'
.format(tags[0], entity_id, sentences[0]['id']))
tags[0] = 'B-' + tags[0]
for i in range(1, len(tags)):
if not tags[i].startswith('I-'):
print('incorrect intermediate tag: {} of {} in {}'
.format(tags[i],
entity_id,
sentences[0]['id']))
tags[i] = 'I-' + tags[i]
for i in range(1, len(tags)):
assert tags[i].startswith('I-')
assert tags[0].startswith('B-')
entity_type = tags[0][2:]
start_token = token_offsets[0]
end_token = token_offsets[-1] + 1
entities.append({
'id': entity_id,
'entity_type': entity_type,
'start_token': start_token,
'end_token': end_token
})
return entities
def _entity_id_exists(sentences: List[Dict[str, Any]],
expected_entity_id: str) -> bool:
for s in sentences:
for entity_id in s['ner_ids']:
if entity_id == expected_entity_id:
return True
return False
def _extract_relations(sentences: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Aggregates relation information into a separate dictionary."""
relations = []
relation_info = [info_tuple
for s in sentences
for info_tuple in zip(s['ner_ids'],
s['relation_roots'],
s['relations'])]
relation_tokens = [i for i in relation_info if i[1] not in ['-1', '0']]
grouped_relations = itertools.groupby(relation_tokens, key=lambda x: x[0])
for _, relation_group in grouped_relations:
tail_id, head_id, relation_type = next(relation_group)
if not _entity_id_exists(sentences, head_id):
# There are several relation annotations, that are missing the
# head entity. Simply skip these.
# see: https://github.com/adobe-research/deft_corpus/issues/20
# print(sentences[0]['id'], ':', tail_id, head_id, relation_type)
continue
assert _entity_id_exists(sentences, head_id), 'head entity not found'
relations.append({
'head_id': head_id,
'tail_id': tail_id,
'relation_type': relation_type
})
return relations
# Add a generic attribute setter
def _get_extension_labeling_fn(extension_name):
def set_extension(_, doc, i, matches):
match_id, start, end = matches[i]
for token in doc[start:end]:
token._.set(extension_name, True)
return set_extension
def _is_chapter_start(sentence: Dict[str, Any]):
"""
Return true if the sentence only contains a chapter start.
Most examples start with a chapter start, i.e. a digit followed by period
character. This is not supposed to be handled as a separate sentence, but
the sentence splitting seems to have introduced newlines in these cases.
"""
if len(sentence['tokens']) != 2:
return False
return sentence['tokens'][0].isdigit() and sentence['tokens'][1] == '.'
if __name__ == '__main__':
main()
|
from mock import PropertyMock, mock
from requests import RequestException
from requests.sessions import Session
from coinbase_commerce.api_resources import Charge, Checkout, Event
from coinbase_commerce.client import Client
from coinbase_commerce.error import (
APIError,
AuthenticationError,
InternalServerError,
InvalidRequestError,
ParamRequiredError,
RateLimitExceededError,
ResourceNotFoundError,
ServiceUnavailableError,
ValidationError,
)
from coinbase_commerce.response import CoinbaseResponse
from tests.base_test_case import BaseTestCase
class TestApiClient(BaseTestCase):
def test_init(self):
api_key, base_url, api_version = TestApiClient.mock_client_params()
client = Client(api_key, base_url, api_version)
self.assertEqual(client.API_VERSION, api_version)
self.assertEqual(client.BASE_API_URI, base_url)
self.assertIsInstance(client.session, Session)
@mock.patch('requests.session')
def test_api_session(self, mocked_session):
api_key, base_url, api_version = TestApiClient.mock_client_params()
with Client(api_key, base_url, api_version):
pass
self.assertEqual(mocked_session.call_count, 1)
self.assertEqual(mocked_session.return_value.close.call_count, 1)
client = Client(api_key, base_url, api_version)
self.assertEqual(mocked_session.call_count, 2)
client.close()
self.assertEqual(mocked_session.return_value.close.call_count, 2)
def test_checkout_relation(self):
client = TestApiClient.mock_client()
checkout = client.checkout
checkout2 = client.checkout
self.assertTrue(hasattr(client, 'checkout'))
self.assertIs(checkout, Checkout)
self.assertIs(checkout, checkout2)
def test_charge_relation(self):
client = TestApiClient.mock_client()
charge = client.charge
charge2 = client.charge
self.assertTrue(hasattr(client, 'charge'))
self.assertIs(charge, Charge)
self.assertIs(charge, charge2)
def test_event_relation(self):
client = TestApiClient.mock_client()
event = client.event
event2 = client.event
self.assertTrue(hasattr(client, 'event'))
self.assertIs(event, Event)
self.assertIs(event, event2)
def test_response_class(self):
client = TestApiClient.mock_client()
self.stub_request('get', 'foo', {})
resp = client._request('get', 'foo')
self.assertTrue(isinstance(resp, CoinbaseResponse))
def test_handle_exception(self):
client = TestApiClient.mock_client()
with self.assertRaises(APIError) as e:
client._handle_request_error(Exception)
self.assertIn("probably a configuration issue locally",
e.exception.args[0])
def test_handle_request_exception(self):
client = TestApiClient.mock_client()
with self.assertRaises(APIError) as e:
client._handle_request_error(RequestException())
self.assertIn('Network error: RequestException',
e.exception.args[0])
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_invalid_request_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.status_code = PropertyMock(return_value=400)
client = TestApiClient.mock_client()
with self.assertRaises(InvalidRequestError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_authentication_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.status_code = PropertyMock(return_value=401)
client = TestApiClient.mock_client()
with self.assertRaises(AuthenticationError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_resource_not_found_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.status_code = PropertyMock(return_value=404)
client = TestApiClient.mock_client()
with self.assertRaises(ResourceNotFoundError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_rate_limit_exceeded_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.content = PropertyMock(
return_value='{"error": {}}'
)
mock.MagicMock.status_code = PropertyMock(return_value=429)
client = TestApiClient.mock_client()
with self.assertRaises(RateLimitExceededError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_internal_server_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.content = PropertyMock(
return_value='{"error": {}}'
)
mock.MagicMock.status_code = PropertyMock(return_value=500)
client = TestApiClient.mock_client()
with self.assertRaises(InternalServerError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_service_unavailable_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.content = PropertyMock(return_value='{}')
mock.MagicMock.status_code = PropertyMock(return_value=503)
client = TestApiClient.mock_client()
with self.assertRaises(ServiceUnavailableError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_param_required_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.content = PropertyMock(
return_value='{"error": {"type": "param_required"}}'
)
client = TestApiClient.mock_client()
with self.assertRaises(ParamRequiredError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_validation_error(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=False)
mock.MagicMock.content = PropertyMock(
return_value='{"error": {"type": "validation_error"}}'
)
client = TestApiClient.mock_client()
with self.assertRaises(ValidationError):
client._request('get', 'foo')
@mock.patch('requests.session', side_effect=mock.MagicMock)
def test_valid_response_proceed(self, session_mock):
mock.MagicMock.ok = PropertyMock(return_value=True)
mock.MagicMock.content = '{"foo":"baz"}'
mock.MagicMock.body = 'bar'
mock.MagicMock.status_code = 200
mock.MagicMock.headers = {}
client = TestApiClient.mock_client()
resp = client._proceed_response(mock.MagicMock)
self.assertIsInstance(resp, CoinbaseResponse)
@staticmethod
def mock_client():
api_key, base_url, api_version = TestApiClient.mock_client_params()
return Client(api_key, base_url, api_version)
@staticmethod
def mock_client_params():
api_key = 'testapikey'
base_url = 'https://base-url.com'
api_version = '2018-03-22'
return api_key, base_url, api_version
|
# -*- coding: utf-8 -*-
""" Comparison of the accuracy and computational speed of different load
aggregation algorithms.
The g-function of a single borehole is calculated for boundary condition of
uniform borehole wall temperature along the borehole. Then, the borehole
wall temperature variations resulting from a time-varying load profile
are simulated using the aggregation methods of Bernier et al. (2004),
Liu (2005), and Claesson and Javed (2012). Results are compared to the
exact solution obtained by convolution in the Fourier domain.
Default parameters are used for each of the aggregation schemes.
"""
from __future__ import absolute_import, division, print_function
import time as tim
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import AutoMinorLocator
from scipy.constants import pi
from scipy.interpolate import interp1d
from scipy.signal import fftconvolve
import pygfunction as gt
def main():
# -------------------------------------------------------------------------
# Simulation parameters
# -------------------------------------------------------------------------
# Borehole dimensions
D = 4.0 # Borehole buried depth (m)
H = 150.0 # Borehole length (m)
r_b = 0.075 # Borehole radius (m)
# Ground properties
alpha = 1.0e-6 # Ground thermal diffusivity (m2/s)
k_s = 2.0 # Ground thermal conductivity (W/m.K)
T_g = 10.0 # Undisturbed ground temperature (degC)
# Number of segments per borehole
nSegments = 12
# Simulation parameters
dt = 3600. # Time step (s)
tmax = 20.*8760. * 3600. # Maximum time (s)
Nt = int(np.ceil(tmax/dt)) # Number of time steps
# Load aggregation schemes
ClaessonJaved = gt.load_aggregation.ClaessonJaved(dt, tmax)
MLAA = gt.load_aggregation.MLAA(dt, tmax)
Liu = gt.load_aggregation.Liu(dt, tmax)
LoadAggSchemes = [ClaessonJaved, MLAA, Liu]
loadAgg_labels = ['Claesson and Javed', 'MLAA', 'Liu']
loadAgg_lines = ['b-', 'k--', 'r-.']
# -------------------------------------------------------------------------
# Calculate g-function
# -------------------------------------------------------------------------
# The field contains only one borehole
boreField = [gt.boreholes.Borehole(H, D, r_b, x=0., y=0.)]
# Evaluate the g-function on a geometrically expanding time grid
time_gFunc = gt.utilities.time_geometric(dt, tmax, 50)
# Calculate g-function
print('Calculation of the g-function ...')
gFunc = gt.gfunction.uniform_temperature(boreField, time_gFunc, alpha,
nSegments=nSegments)
# -------------------------------------------------------------------------
# Simulation
# -------------------------------------------------------------------------
nLoadAgg = len(LoadAggSchemes)
T_b = np.zeros((nLoadAgg, Nt))
Q = np.zeros(Nt)
t_calc = np.zeros(nLoadAgg)
for n in range(nLoadAgg):
print('Simulation using {} ...'.format(loadAgg_labels[n]))
# Select aggregation scheme
LoadAgg = LoadAggSchemes[n]
# Interpolate g-function at required times
time_req = LoadAgg.get_times_for_simulation()
gFunc_int = interp1d(np.hstack([0., time_gFunc]),
np.hstack([0., gFunc]),
kind='cubic',
bounds_error=False,
fill_value=(0., gFunc[-1]))(time_req)
# Initialize load aggregation scheme
LoadAgg.initialize(gFunc_int/(2*pi*k_s))
tic = tim.time()
time = 0.
i = -1
while time < tmax:
# Increment time step by (1)
time += dt
i += 1
LoadAgg.next_time_step(time)
# Evaluate heat extraction rate
Q[i] = synthetic_load(time/3600.)
# Apply current load
LoadAgg.set_current_load(Q[i]/H)
# Evaluate borehole wall temeprature
deltaT_b = LoadAgg.temporal_superposition()
T_b[n,i] = T_g - deltaT_b
toc = tim.time()
t_calc[n] = toc - tic
# -------------------------------------------------------------------------
# Calculate exact solution from convolution in the Fourier domain
# -------------------------------------------------------------------------
# Heat extraction rate increment
dQ = np.zeros(Nt)
dQ[0] = Q[0]
# Interpolated g-function
time = np.array([(j+1)*dt for j in range(Nt)])
g = interp1d(time_gFunc, gFunc)(time)
for i in range(1, Nt):
dQ[i] = Q[i] - Q[i-1]
# Convolution in Fourier domain
T_b_exact = T_g - fftconvolve(dQ, g/(2.0*pi*k_s*H), mode='full')[0:Nt]
# -------------------------------------------------------------------------
# plot results
# -------------------------------------------------------------------------
plt.rc('figure')
fig = plt.figure()
ax1 = fig.add_subplot(311)
# Axis labels
ax1.set_xlabel(r'$t$ (hours)')
ax1.set_ylabel(r'$Q$ (W)')
hours = np.array([(j+1)*dt/3600. for j in range(Nt)])
ax1.plot(hours, Q, 'b-', lw=1.5)
ax2 = fig.add_subplot(312)
# Axis labels
ax2.set_xlabel(r'$t$ (hours)')
ax2.set_ylabel(r'$T_b$ (degC)')
for n in range(nLoadAgg):
ax2.plot(hours, T_b[n,:],
loadAgg_lines[n], lw=1.5, label=loadAgg_labels[n])
ax2.plot(hours, T_b_exact, 'k.', lw=1.5, label='exact')
ax2.legend()
ax3 = fig.add_subplot(313)
# Axis labels
ax3.set_xlabel(r'$t$ (hours)')
ax3.set_ylabel(r'Error (degC)')
for n in range(nLoadAgg):
ax3.plot(hours, T_b[n,:] - T_b_exact,
loadAgg_lines[n], lw=1.5, label=loadAgg_labels[n])
# Show minor ticks
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax2.xaxis.set_minor_locator(AutoMinorLocator())
ax2.yaxis.set_minor_locator(AutoMinorLocator())
ax3.xaxis.set_minor_locator(AutoMinorLocator())
ax3.yaxis.set_minor_locator(AutoMinorLocator())
# Adjust to plot window
plt.tight_layout()
# -------------------------------------------------------------------------
# Print performance metrics
# -------------------------------------------------------------------------
# Maximum errors in evaluation of borehole wall temperatures
maxError = np.array([np.max(np.abs(T_b[n,:]-T_b_exact))
for n in range(nLoadAgg)])
# Print results
print('Simulation results')
horizontalLine = '-'*66
for n in range(nLoadAgg):
print(horizontalLine)
print(loadAgg_labels[n])
print()
print('Maximum absolute error : {} degC'.format(maxError[n]))
print('Calculation time : {} seconds'.format(t_calc[n]))
print()
print(horizontalLine)
return
def synthetic_load(x):
"""
Synthetic load profile of Bernier et al. (2004).
Returns load y (in watts) at time x (in hours).
"""
A = 2000.0
B = 2190.0
C = 80.0
D = 2.0
E = 0.01
F = 0.0
G = 0.95
func = (168.0-C)/168.0
for i in [1, 2, 3]:
func += 1.0/(i*pi)*(np.cos(C*pi*i/84.0) - 1.0) \
*(np.sin(pi*i/84.0*(x - B)))
func = func*A*np.sin(pi/12.0*(x - B)) \
*np.sin(pi/4380.0*(x - B))
y = func + (-1.0)**np.floor(D/8760.0*(x - B))*abs(func) \
+ E*(-1.0)**np.floor(D/8760.0*(x - B)) \
/np.sign(np.cos(D*pi/4380.0*(x - F)) + G)
return -np.array([y])
# Main function
if __name__ == '__main__':
main()
|
"""
https://leetcode.com/problems/lru-cache/
Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and put.
get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
put(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.
The cache is initialized with a positive capacity.
Follow up:
Could you do both operations in O(1) time complexity?
Example:
LRUCache cache = new LRUCache( 2 /* capacity */ );
cache.put(1, 1);
cache.put(2, 2);
cache.get(1); // returns 1
cache.put(3, 3); // evicts key 2
cache.get(2); // returns -1 (not found)
cache.put(4, 4); // evicts key 1
cache.get(1); // returns -1 (not found)
cache.get(3); // returns 3
cache.get(4); // returns 4
"""
# two methods are provided, both of them are inspired by @tusizi in the discussion
# The first is using ordered list provided by python collections package
# The second is using double linked list and dictionary. The double linked list is for storing the order, the dictionary is for storing the key-node mapping.
# time complexity: O(1) for both, space complexity: O(n), second uses more space compared with the first one.
class LRUCache:
"""
this solution is using ordered dict
from collections import OrderedDict
def __init__(self, capacity: int):
self.capacity = capacity
self.cache = OrderedDict()
def get(self, key: int) -> int:
if key not in self.cache:
return -1
self.cache.move_to_end(key)
return self.cache[key]
def put(self, key: int, value: int) -> None:
if key in self.cache:
self.cache.move_to_end(key)
self.cache[key] = value
else:
if self.capacity == len(self.cache):
self.cache.popitem(last=False)
self.cache[key] = value
"""
"""
another solution will be using a double linked list and a dict, dict is used to store the key-value, and double linked list is to keep the ordering
"""
def __init__(self, capacity: int):
self.capacity = capacity
self.cache = dict() # key => Node
self.head = Node(0, 0)
self.tail = Node(0, 0)
self.head.next = self.tail
self.tail.prev = self.head
def _delete(self, node):
node.prev.next, node.next.prev = node.next, node.prev
del self.cache[node.key]
def _add(self, key, val):
node = Node(key, val)
self.tail.prev.next, self.tail.prev, node.prev, node.next = node, node, self.tail.prev, self.tail
self.cache[key] = node
def get(self, key: int) -> int:
if key not in self.cache:
return -1
else:
node = self.cache[key]
key, val = node.key, node.val
self._delete(node)
self._add(key, val)
return val
def put(self, key: int, value: int) -> None:
if key in self.cache:
self._delete(self.cache[key])
self._add(key, value)
else:
if len(self.cache) == self.capacity:
self._delete(self.head.next)
self._add(key, value)
class Node:
def __init__(self, key, val):
self.key = key
self.val = val
self.prev = None
self.next = None
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
|
# -*- encoding: utf-8 -*-
"""
HubbleStack Nova plugin for using grep to verify settings in files.
Supports both blacklisting and whitelisting patterns. Blacklisted patterns must
not be found in the specified file. Whitelisted patterns must be found in the
specified file.
This audit module requires yaml data to execute. It will search the local
directory for any .yaml files, and if it finds a top-level 'grep' key, it will
use that data.
Sample YAML data, with inline comments:
grep:
whitelist: # or blacklist
fstab_tmp_partition: # unique ID
data:
CentOS Linux-6: # osfinger grain
- '/etc/fstab': # filename
tag: 'CIS-1.1.1' # audit tag
pattern: '/tmp' # grep pattern
match_output: 'nodev' # string to check for in output of grep command (optional)
match_output_regex: True # whether to use regex when matching output (default: False)
match_output_multiline: False # whether to use multiline flag for regex matching (default: True)
grep_args: # extra args to grep
- '-E'
- '-i'
- '-B2'
match_on_file_missing: True # See (1) below
'*': # wildcard, will be run if no direct osfinger match
- '/etc/fstab':
tag: 'CIS-1.1.1'
pattern: '/tmp'
# The rest of these attributes are optional, and currently not used
description: |
The /tmp directory is intended to be world-writable, which presents a risk
of resource exhaustion if it is not bound to a separate partition.
labels:
- critical
- raiseticket
alert: email
trigger: state
(1) If `match_on_file_missing` is ommitted, success/failure will be determined
entirely based on the grep command and other arguments. If it's set to True and
the file is missing, then it will be considered a match (success for whitelist,
failure for blacklist). If it's set to False and the file is missing, then it
will be considered a non-match (success for blacklist, failure for whitelist).
If the file exists, this setting is ignored.
"""
from __future__ import absolute_import
import logging
import fnmatch
import os
import copy
import salt.utils
import salt.utils.platform
import re
from distutils.version import LooseVersion
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.platform.is_windows():
return False, 'This audit module only runs on linux'
return True
def apply_labels(__data__, labels):
"""
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
"""
labelled_data = {}
if labels:
labelled_data['grep'] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get('grep', {}):
labelled_test_cases=[]
for test_case in __data__['grep'].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data['grep'][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
"""
Run the grep audits contained in the YAML files processed by __virtual__
"""
__data__ = {}
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('grep audit __data__:')
log.debug(__data__)
log.debug('grep audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audittype = tag_data['type']
if 'pattern' not in tag_data:
log.error('No version found for grep audit {0}, file {1}'
.format(tag, name))
tag_data = copy.deepcopy(tag_data)
tag_data['error'] = 'No pattern found'.format(mod)
tag_data['failure_reason'] = 'No pattern found for the test case.' \
' Seems like a bug in hubble profile.'
ret['Failure'].append(tag_data)
continue
grep_args = tag_data.get('grep_args', [])
if isinstance(grep_args, str):
grep_args = [grep_args]
grep_ret = _grep(name,
tag_data['pattern'],
*grep_args).get('stdout')
found = False
failure_reason = ''
if grep_ret:
found = True
failure_reason = "Found the blacklisted string '{0}' in file '{1}'." \
" The file should not contain any string like '{2}'" \
.format(grep_ret,
name,
tag_data['pattern'])
if 'match_output' in tag_data:
if not tag_data.get('match_output_regex'):
if tag_data['match_output'] not in grep_ret:
found = False
failure_reason = "In file '{0}', could not find text pattern " \
"'{1}' in '{2}'".format(name,
tag_data['match_output'],
grep_ret)
else: # match with regex
if tag_data.get('match_output_multiline', True):
if not re.search(tag_data['match_output'], grep_ret, re.MULTILINE):
found = False
failure_reason = "In file '{0}', could not find multiline" \
" regex pattern '{1}' in '{2}'" \
.format(name,
tag_data['match_output'],
grep_ret)
else:
if not re.search(tag_data['match_output'], grep_ret):
found = False
failure_reason = "In file '{0}', could not find regex " \
"pattern '{1}' in '{2}'" \
.format(name,
tag_data['match_output'],
grep_ret)
if not os.path.exists(name) and 'match_on_file_missing' in tag_data:
if tag_data['match_on_file_missing']:
found = True
failure_reason = "Found the file '{0}'. This blaclisted file " \
"should not exist.".format(name)
else:
found = False
failure_reason = "Could not find the required file '{0}'".format(name)
# Blacklisted pattern (must not be found)
if audittype == 'blacklist':
if found:
tag_data['failure_reason'] = failure_reason
ret['Failure'].append(tag_data)
else:
ret['Success'].append(tag_data)
# Whitelisted pattern (must be found)
elif audittype == 'whitelist':
if found:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = failure_reason
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
"""
Merge two yaml dicts together at the grep:blacklist and grep:whitelist level
"""
if 'grep' not in ret:
ret['grep'] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get('grep', {}):
if topkey not in ret['grep']:
ret['grep'][topkey] = []
for key, val in data['grep'][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['grep'][topkey].append({key: val})
return ret
def _get_tags(data):
"""
Retrieve all the tags for this distro from the yaml
"""
ret = {}
distro = __grains__.get('osfinger')
for toplist, toplevel in data.get('grep', {}).iteritems():
# grep:blacklist
for audit_dict in toplevel:
# grep:blacklist:0
for audit_id, audit_data in audit_dict.iteritems():
# grep:blacklist:0:telnet
tags_dict = audit_data.get('data', {})
# grep:blacklist:0:telnet:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# grep:blacklist:0:telnet:data:Debian-8
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'grep',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _grep(path,
pattern,
*args):
"""
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
"""
path = os.path.expanduser(path)
if args:
options = ' '.join(args)
else:
options = ''
cmd = (
r'''grep {options} {pattern} {path}'''
.format(
options=options,
pattern=pattern,
path=path,
)
)
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from pants.backend.docgen.targets.doc import Page
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.python.targets.python_app import PythonApp
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.base.build_environment import get_buildroot
from pants.base.file_system_project_tree import FileSystemProjectTree
from pants.build_graph.remote_sources import RemoteSources
from pants.engine.build_files import create_graph_rules
from pants.engine.fs import create_fs_rules
from pants.engine.isolated_process import create_process_rules
from pants.engine.legacy.address_mapper import LegacyAddressMapper
from pants.engine.legacy.graph import (LegacyBuildGraph, TransitiveHydratedTargets,
create_legacy_graph_tasks)
from pants.engine.legacy.options_parsing import create_options_parsing_rules
from pants.engine.legacy.parser import LegacyPythonCallbacksParser
from pants.engine.legacy.structs import (AppAdaptor, JvmBinaryAdaptor, PageAdaptor,
PantsPluginAdaptor, PythonBinaryAdaptor,
PythonTargetAdaptor, PythonTestsAdaptor,
RemoteSourcesAdaptor, TargetAdaptor)
from pants.engine.mapper import AddressMapper
from pants.engine.native import Native
from pants.engine.parser import SymbolTable
from pants.engine.rules import SingletonRule
from pants.engine.scheduler import Scheduler
from pants.init.options_initializer import BuildConfigInitializer
from pants.option.global_options import (DEFAULT_EXECUTION_OPTIONS, ExecutionOptions,
GlobMatchErrorBehavior)
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class LegacySymbolTable(SymbolTable):
"""A v1 SymbolTable facade for use with the v2 engine."""
def __init__(self, build_file_aliases):
"""
:param build_file_aliases: BuildFileAliases to register.
:type build_file_aliases: :class:`pants.build_graph.build_file_aliases.BuildFileAliases`
"""
self._build_file_aliases = build_file_aliases
self._table = {
alias: self._make_target_adaptor(TargetAdaptor, target_type)
for alias, target_type in build_file_aliases.target_types.items()
}
for alias, factory in build_file_aliases.target_macro_factories.items():
# TargetMacro.Factory with more than one target type is deprecated.
# For default sources, this means that TargetMacro Factories with more than one target_type
# will not parse sources through the engine, and will fall back to the legacy python sources
# parsing.
# Conveniently, multi-target_type TargetMacro.Factory, and legacy python source parsing, are
# targeted to be removed in the same version of pants.
if len(factory.target_types) == 1:
self._table[alias] = self._make_target_adaptor(
TargetAdaptor,
tuple(factory.target_types)[0],
)
# TODO: The alias replacement here is to avoid elevating "TargetAdaptors" into the public
# API until after https://github.com/pantsbuild/pants/issues/3560 has been completed.
# These should likely move onto Target subclasses as the engine gets deeper into beta
# territory.
self._table['python_library'] = self._make_target_adaptor(PythonTargetAdaptor, PythonLibrary)
self._table['jvm_app'] = self._make_target_adaptor(AppAdaptor, JvmApp)
self._table['jvm_binary'] = self._make_target_adaptor(JvmBinaryAdaptor, JvmBinary)
self._table['python_app'] = self._make_target_adaptor(AppAdaptor, PythonApp)
self._table['python_tests'] = self._make_target_adaptor(PythonTestsAdaptor, PythonTests)
self._table['python_binary'] = self._make_target_adaptor(PythonBinaryAdaptor, PythonBinary)
self._table['remote_sources'] = self._make_target_adaptor(RemoteSourcesAdaptor, RemoteSources)
self._table['page'] = self._make_target_adaptor(PageAdaptor, Page)
# Note that these don't call _make_target_adaptor because we don't have a handy reference to the
# types being constructed. They don't have any default_sources behavior, so this should be ok,
# but if we end up doing more things in _make_target_adaptor, we should make sure they're
# applied here too.
self._table['pants_plugin'] = PantsPluginAdaptor
self._table['contrib_plugin'] = PantsPluginAdaptor
def aliases(self):
return self._build_file_aliases
def table(self):
return self._table
@classmethod
def _make_target_adaptor(cls, base_class, target_type):
"""
Look up the default source globs for the type, and apply them to parsing through the engine.
"""
if not target_type.supports_default_sources() or target_type.default_sources_globs is None:
return base_class
globs = _tuplify(target_type.default_sources_globs)
excludes = _tuplify(target_type.default_sources_exclude_globs)
class GlobsHandlingTargetAdaptor(base_class):
@property
def default_sources_globs(self):
if globs is None:
return super(GlobsHandlingTargetAdaptor, self).default_sources_globs
else:
return globs
@property
def default_sources_exclude_globs(self):
if excludes is None:
return super(GlobsHandlingTargetAdaptor, self).default_sources_exclude_globs
else:
return excludes
return GlobsHandlingTargetAdaptor
def _tuplify(v):
if v is None:
return None
if isinstance(v, tuple):
return v
if isinstance(v, (list, set)):
return tuple(v)
return (v,)
class LegacyGraphScheduler(datatype(['scheduler', 'symbol_table'])):
"""A thin wrapper around a Scheduler configured with @rules for a symbol table."""
def new_session(self):
session = self.scheduler.new_session()
return LegacyGraphSession(session, self.symbol_table)
class LegacyGraphSession(datatype(['scheduler_session', 'symbol_table'])):
"""A thin wrapper around a SchedulerSession configured with @rules for a symbol table."""
def warm_product_graph(self, target_roots):
"""Warm the scheduler's `ProductGraph` with `TransitiveHydratedTargets` products.
:param TargetRoots target_roots: The targets root of the request.
"""
logger.debug('warming target_roots for: %r', target_roots)
subjects = target_roots.specs
if not subjects:
subjects = []
request = self.scheduler_session.execution_request([TransitiveHydratedTargets], subjects)
result = self.scheduler_session.execute(request)
if result.error:
raise result.error
def create_build_graph(self, target_roots, build_root=None):
"""Construct and return a `BuildGraph` given a set of input specs.
:param TargetRoots target_roots: The targets root of the request.
:param string build_root: The build root.
:returns: A tuple of (BuildGraph, AddressMapper).
"""
logger.debug('target_roots are: %r', target_roots)
graph = LegacyBuildGraph.create(self.scheduler_session, self.symbol_table)
logger.debug('build_graph is: %s', graph)
# Ensure the entire generator is unrolled.
for _ in graph.inject_roots_closure(target_roots):
pass
address_mapper = LegacyAddressMapper(self.scheduler_session, build_root or get_buildroot())
logger.debug('address_mapper is: %s', address_mapper)
return graph, address_mapper
class EngineInitializer(object):
"""Constructs the components necessary to run the v2 engine with v1 BuildGraph compatibility."""
@staticmethod
def setup_legacy_graph(native, bootstrap_options, build_configuration):
"""Construct and return the components necessary for LegacyBuildGraph construction."""
return EngineInitializer.setup_legacy_graph_extended(
bootstrap_options.pants_ignore,
bootstrap_options.pants_workdir,
bootstrap_options.build_file_imports,
build_configuration,
native=native,
glob_match_error_behavior=bootstrap_options.glob_expansion_failure,
rules=build_configuration.rules(),
build_ignore_patterns=bootstrap_options.build_ignore,
exclude_target_regexps=bootstrap_options.exclude_target_regexp,
subproject_roots=bootstrap_options.subproject_roots,
include_trace_on_error=bootstrap_options.print_exception_stacktrace,
execution_options=ExecutionOptions.from_bootstrap_options(bootstrap_options),
)
@staticmethod
def setup_legacy_graph_extended(
pants_ignore_patterns,
workdir,
build_file_imports_behavior,
build_configuration,
build_root=None,
native=None,
glob_match_error_behavior=None,
rules=None,
build_ignore_patterns=None,
exclude_target_regexps=None,
subproject_roots=None,
include_trace_on_error=True,
execution_options=None,
):
"""Construct and return the components necessary for LegacyBuildGraph construction.
:param list pants_ignore_patterns: A list of path ignore patterns for FileSystemProjectTree,
usually taken from the '--pants-ignore' global option.
:param str workdir: The pants workdir.
:param build_file_imports_behavior: How to behave if a BUILD file being parsed tries to use
import statements. Valid values: "allow", "warn", "error".
:type build_file_imports_behavior: string
:param str build_root: A path to be used as the build root. If None, then default is used.
:param Native native: An instance of the native-engine subsystem.
:param build_configuration: The `BuildConfiguration` object to get build file aliases from.
:type build_configuration: :class:`pants.build_graph.build_configuration.BuildConfiguration`
:param glob_match_error_behavior: How to behave if a glob specified for a target's sources or
bundles does not expand to anything.
:type glob_match_error_behavior: :class:`pants.option.global_options.GlobMatchErrorBehavior`
:param list build_ignore_patterns: A list of paths ignore patterns used when searching for BUILD
files, usually taken from the '--build-ignore' global option.
:param list exclude_target_regexps: A list of regular expressions for excluding targets.
:param list subproject_roots: Paths that correspond with embedded build roots
under the current build root.
:param bool include_trace_on_error: If True, when an error occurs, the error message will
include the graph trace.
:param execution_options: Option values for (remote) process execution.
:type execution_options: :class:`pants.option.global_options.ExecutionOptions`
:returns: A LegacyGraphScheduler.
"""
build_root = build_root or get_buildroot()
build_configuration = build_configuration or BuildConfigInitializer.get(OptionsBootstrapper())
build_file_aliases = build_configuration.registered_aliases()
rules = rules or build_configuration.rules() or []
symbol_table = LegacySymbolTable(build_file_aliases)
project_tree = FileSystemProjectTree(build_root, pants_ignore_patterns)
execution_options = execution_options or DEFAULT_EXECUTION_OPTIONS
# Register "literal" subjects required for these rules.
parser = LegacyPythonCallbacksParser(
symbol_table,
build_file_aliases,
build_file_imports_behavior
)
address_mapper = AddressMapper(parser=parser,
build_ignore_patterns=build_ignore_patterns,
exclude_target_regexps=exclude_target_regexps,
subproject_roots=subproject_roots)
# Load the native backend.
native = native or Native.create()
# Create a Scheduler containing graph and filesystem rules, with no installed goals. The
# LegacyBuildGraph will explicitly request the products it needs.
rules = (
[
SingletonRule.from_instance(GlobMatchErrorBehavior.create(glob_match_error_behavior)),
SingletonRule.from_instance(build_configuration),
] +
create_legacy_graph_tasks(symbol_table) +
create_fs_rules() +
create_process_rules() +
create_graph_rules(address_mapper, symbol_table) +
create_options_parsing_rules() +
rules
)
scheduler = Scheduler(
native,
project_tree,
workdir,
rules,
execution_options,
include_trace_on_error=include_trace_on_error,
)
return LegacyGraphScheduler(scheduler, symbol_table)
|
# -*- coding: UTF-8 -*-
"""
This module provides the abstract base classes and core concepts
for the model elements in behave.
"""
import os.path
import sys
import six
from behave.capture import Captured
from behave.textutil import text as _text
from enum import Enum
if six.PY2:
# -- USE: Python3 backport for better unicode compatibility.
import traceback2 as traceback
else:
import traceback
PLATFORM_WIN = sys.platform.startswith("win")
def posixpath_normalize(path):
return path.replace("\\", "/")
# -----------------------------------------------------------------------------
# GENERIC MODEL CLASSES:
# -----------------------------------------------------------------------------
class Status(Enum):
"""Provides the (test-run) status of a model element.
Features and Scenarios use: untested, skipped, passed, failed.
Steps may use all enum-values.
Enum values:
* untested (initial state):
Defines the initial state before a test-run.
Sometimes used to indicate that the model element was not executed
during a test run.
* skipped:
A model element is skipped because it should not run.
This is caused by filtering mechanisms, like tags, active-tags,
file-location arg, select-by-name, etc.
* passed: A model element was executed and passed (without failures).
* failed: Failures occurred while executing it.
* undefined: Used for undefined-steps (no step implementation was found).
* executing: Marks the steps during execution (used in a formatter)
.. versionadded:: 1.2.6
Superceeds string-based status values.
"""
untested = 0
skipped = 1
passed = 2
failed = 3
undefined = 4
executing = 5
def __eq__(self, other):
"""Comparison operator equals-to other value.
Supports other enum-values and string (for backward compatibility).
EXAMPLES::
status = Status.passed
assert status == Status.passed
assert status == "passed"
assert status != "failed"
:param other: Other value to compare (enum-value, string).
:return: True, if both values are equal. False, otherwise.
"""
if isinstance(other, six.string_types):
# -- CONVENIENCE: Compare with string-name (backward-compatible)
return self.name == other
return super(Status, self).__eq__(other)
@classmethod
def from_name(cls, name):
"""Select enumeration value by using its name.
:param name: Name as key to the enum value (as string).
:return: Enum value (instance)
:raises: LookupError, if status name is unknown.
"""
# pylint: disable=no-member
enum_value = cls.__members__.get(name, None)
if enum_value is None:
known_names = ", ".join(cls.__members__.keys())
raise LookupError("%s (expected: %s)" % (name, known_names))
return enum_value
class Argument(object):
"""An argument found in a *feature file* step name and extracted using
step decorator `parameters`_.
The attributes are:
.. attribute:: original
The actual text matched in the step name.
.. attribute:: value
The potentially type-converted value of the argument.
.. attribute:: name
The name of the argument. This will be None if the parameter is
anonymous.
.. attribute:: start
The start index in the step name of the argument. Used for display.
.. attribute:: end
The end index in the step name of the argument. Used for display.
"""
def __init__(self, start, end, original, value, name=None):
self.start = start
self.end = end
self.original = original
self.value = value
self.name = name
# @total_ordering
# class FileLocation(unicode):
class FileLocation(object):
"""
Provides a value object for file location objects.
A file location consists of:
* filename
* line (number), optional
LOCATION SCHEMA:
* "{filename}:{line}" or
* "{filename}" (if line number is not present)
"""
__pychecker__ = "missingattrs=line" # -- Ignore warnings for 'line'.
def __init__(self, filename, line=None):
if PLATFORM_WIN:
filename = posixpath_normalize(filename)
self.filename = filename
self.line = line
def get(self):
return self.filename
def abspath(self):
return os.path.abspath(self.filename)
def basename(self):
return os.path.basename(self.filename)
def dirname(self):
return os.path.dirname(self.filename)
def relpath(self, start=os.curdir):
"""Compute relative path for start to filename.
:param start: Base path or start directory (default=current dir).
:return: Relative path from start to filename
"""
return os.path.relpath(self.filename, start)
def exists(self):
return os.path.exists(self.filename)
def _line_lessthan(self, other_line):
if self.line is None:
# return not (other_line is None)
return other_line is not None
elif other_line is None:
return False
else:
return self.line < other_line
def __eq__(self, other):
if isinstance(other, FileLocation):
return self.filename == other.filename and self.line == other.line
elif isinstance(other, six.string_types):
return self.filename == other
else:
raise TypeError("Cannot compare FileLocation with %s:%s" % \
(type(other), other))
def __ne__(self, other):
# return not self == other # pylint: disable=unneeded-not
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, FileLocation):
if self.filename < other.filename:
return True
elif self.filename > other.filename:
return False
else:
assert self.filename == other.filename
return self._line_lessthan(other.line)
elif isinstance(other, six.string_types):
return self.filename < other
else:
raise TypeError("Cannot compare FileLocation with %s:%s" % \
(type(other), other))
def __le__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
# return not other < self # pylint unneeded-not
return other >= self
def __gt__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
if isinstance(other, FileLocation):
return other < self
else:
return self.filename > other
def __ge__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
# return not self < other
return not self.__lt__(other)
def __repr__(self):
return u'<FileLocation: filename="%s", line=%s>' % \
(self.filename, self.line)
def __str__(self):
filename = self.filename
if isinstance(filename, six.binary_type):
filename = _text(filename, "utf-8")
if self.line is None:
return filename
return u"%s:%d" % (filename, self.line)
if six.PY2:
__unicode__ = __str__
__str__ = lambda self: self.__unicode__().encode("utf-8")
@classmethod
def for_function(cls, func, curdir=None):
"""Extracts the location information from the function and builds
the location string (schema: "{source_filename}:{line_number}").
:param func: Function whose location should be determined.
:return: FileLocation object
"""
func = unwrap_function(func)
function_code = six.get_function_code(func)
filename = function_code.co_filename
line_number = function_code.co_firstlineno
curdir = curdir or os.getcwd()
try:
filename = os.path.relpath(filename, curdir)
except ValueError:
# WINDOWS-SPECIFIC (#599):
# If a step-function comes from a different disk drive,
# a relative path will fail: Keep the absolute path.
pass
return cls(filename, line_number)
# -----------------------------------------------------------------------------
# ABSTRACT MODEL CLASSES (and concepts):
# -----------------------------------------------------------------------------
class BasicStatement(object):
def __init__(self, filename, line, keyword, name):
filename = filename or '<string>'
filename = os.path.relpath(filename, os.getcwd()) # -- NEEDS: abspath?
self.location = FileLocation(filename, line)
assert isinstance(keyword, six.text_type)
assert isinstance(name, six.text_type)
self.keyword = keyword
self.name = name
# -- SINCE: 1.2.6
self.captured = Captured()
# -- ERROR CONTEXT INFO:
self.exception = None
self.exc_traceback = None
self.error_message = None
@property
def filename(self):
# return os.path.abspath(self.location.filename)
return self.location.filename
@property
def line(self):
return self.location.line
def reset(self):
# -- RESET: Captured output data
self.captured.reset()
# -- RESET: ERROR CONTEXT INFO
self.exception = None
self.exc_traceback = None
self.error_message = None
def send_status(self):
"""Emit the volatile attributes of this model in a primitive dict
"""
ret = {'exception': self.exception,
'error_message': self.error_message,
'exc_traceback': self.exc_traceback,
'captured': self.captured.send_status()
}
return ret
def recv_status(self, value):
"""Set volatile attributes from a `send_status()` primitive value
"""
for key in 'exception', 'error_message', 'exc_traceback':
if key in value:
setattr(self, key, value[key])
if 'captured' in value:
self.captured.recv_status(value['captured'])
def store_exception_context(self, exception):
self.exception = exception
self.exc_traceback = traceback.format_tb(sys.exc_info()[2])
def __hash__(self):
# -- NEEDED-FOR: PYTHON3
# return id((self.keyword, self.name))
return id(self)
def __eq__(self, other):
# -- PYTHON3 SUPPORT, ORDERABLE:
# NOTE: Ignore potential FileLocation differences.
return (self.keyword, self.name) == (other.keyword, other.name)
def __lt__(self, other):
# -- PYTHON3 SUPPORT, ORDERABLE:
# NOTE: Ignore potential FileLocation differences.
return (self.keyword, self.name) < (other.keyword, other.name)
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
# return not other < self
return other >= self
def __gt__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
assert isinstance(other, BasicStatement)
return other < self
def __ge__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
# OR: return self >= other
return not self < other # pylint: disable=unneeded-not
# def __cmp__(self, other):
# # -- NOTE: Ignore potential FileLocation differences.
# return cmp((self.keyword, self.name), (other.keyword, other.name))
class TagStatement(BasicStatement):
def __init__(self, filename, line, keyword, name, tags):
if tags is None:
tags = []
super(TagStatement, self).__init__(filename, line, keyword, name)
self.tags = tags
def should_run_with_tags(self, tag_expression):
"""Determines if statement should run when the tag expression is used.
:param tag_expression: Runner/config environment tags to use.
:return: True, if examples should run. False, otherwise (skip it).
"""
return tag_expression.check(self.tags)
class TagAndStatusStatement(BasicStatement):
# final_status = ('passed', 'failed', 'skipped')
final_status = (Status.passed, Status.failed, Status.skipped)
def __init__(self, filename, line, keyword, name, tags):
super(TagAndStatusStatement, self).__init__(filename, line, keyword, name)
self.tags = tags
self.should_skip = False
self.skip_reason = None
self._cached_status = Status.untested
def should_run_with_tags(self, tag_expression):
"""Determines if statement should run when the tag expression is used.
:param tag_expression: Runner/config environment tags to use.
:return: True, if examples should run. False, otherwise (skip it).
"""
return tag_expression.check(self.tags)
@property
def status(self):
if self._cached_status not in self.final_status:
# -- RECOMPUTE: As long as final status is not reached.
self._cached_status = self.compute_status()
return self._cached_status
@property
def is_finished(self):
return self._cached_status in self.final_status
def set_status(self, value):
if isinstance(value, six.string_types):
value = Status.from_name(value)
self._cached_status = value
def clear_status(self):
self._cached_status = Status.untested
def reset(self):
self.should_skip = False
self.skip_reason = None
self.clear_status()
def compute_status(self):
raise NotImplementedError
def send_status(self):
ret = super(TagAndStatusStatement, self).send_status()
ret['status'] = self._cached_status
ret['should_skip'] = self.should_skip
ret['skip_reason'] = self.skip_reason
return ret
def recv_status(self, value):
assert self._cached_status == Status.untested
super(TagAndStatusStatement, self).recv_status(value)
if 'should_skip' in value:
self.should_skip = value['should_skip']
if 'skip_reason' in value:
self.skip_reason = value['skip_reason']
if 'status' in value:
self._cached_status = value['status']
class Replayable(object):
type = None
def replay(self, formatter):
getattr(formatter, self.type)(self)
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# -----------------------------------------------------------------------------
def unwrap_function(func, max_depth=10):
"""Unwraps a function that is wrapped with :func:`functools.partial()`"""
iteration = 0
wrapped = getattr(func, "__wrapped__", None)
while wrapped and iteration < max_depth:
func = wrapped
wrapped = getattr(func, "__wrapped__", None)
iteration += 1
return func
|
"""Atakama sdk."""
from os import path
from setuptools import setup
def long_description():
"""Extract description from readme."""
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md")) as readme_f:
contents = readme_f.read()
return contents
setup(
name="atakama",
version="1.3.0",
description="Atakama sdk",
packages=["atakama"],
long_description=long_description(),
long_description_content_type="text/markdown",
setup_requires=["wheel"],
install_requires=["oscrypto", "certvalidator"],
entry_points={"console_scripts": "atakama-pkg=atakama.packager:main"},
)
|
primes = []
n = 10
arr = [1]*(n+1)
arr[0] = 0
arr[1] = 0
for i in range(2, n+1, 1):
if arr[i]:
primes.append(i);
for j in range(2*i, n+1, i):
arr[j] = 0
# Regression
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
plt.title("Prime Distribution")
plt.scatter(np.arange(len(primes)), primes)
plt.ylabel("n-th prime")
plt.xlabel("n")
# regression
learning_rate = 0.001
epoch = 4000
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
deg = 4
def model(X_, w):
polynoms = []
for i in range(deg):
polynom = tf.multiply(w[i], tf.pow(X_, i))
polynoms.append(polynom)
return tf.add_n(polynoms)
w = tf.Variable([0.]*deg, name="parameters")
y_model = model(X, w)
cost = (tf.pow(Y-y_model, 2))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epc in range(epoch):
for (x, y) in zip(np.arange(len(primes)), primes):
sess.run(train_op, feed_dict={X: x, Y: y})
w_val = sess.run(w)
print(w_val)
sess.close()
poly_X = np.arange(len(primes))
poly_Y = []
for x_ in poly_X:
poly_y = 0
for i in range(deg):
poly_y += w_val[i] * pow(x_, i)
poly_Y.append(poly_y)
plt.plot(poly_X, poly_Y, "r")
plt.show()
|
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
import lightgbm as lgb
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
import matplotlib as mpl
from sklearn import preprocessing as pp
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import log_loss
from sklearn.metrics import precision_recall_curve, average_precision_score
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.metrics import confusion_matrix, classification_report
get_ipython().run_line_magic('matplotlib', 'inline')
current_path = os.getcwd()
file = '/CHANGE ME TO CORRECT PATH/features.csv'
data = pd.read_csv(current_path + file)
dataX = data.copy().drop(['Class'],axis=1)
dataY = data['Class'].copy()
featuresToScale = dataX.drop(['Time'],axis=1).columns
sX = pp.StandardScaler(copy=True)
dataX.loc[:,featuresToScale] = sX.fit_transform(dataX[featuresToScale])
scalingFactors = pd.DataFrame(data=[sX.mean_,sX.scale_],index=['Mean','StDev'],columns=featuresToScale)
X_train, X_test, y_train, y_test = train_test_split(dataX,
dataY, test_size=0.10,
random_state=2019, stratify=dataY)
#Trainingset 10-fold cross validation
k_fold = StratifiedKFold(n_splits=10,shuffle=True,random_state=2018)
penalty = 'l2'
C = 1.0
class_weight = 'balanced'
random_state = 2018
solver = 'liblinear'
n_jobs = 1
logReg = LogisticRegression(penalty=penalty, C=C,
class_weight=class_weight, random_state=random_state,
solver=solver, n_jobs=n_jobs)
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=[0,1])
model = logReg
for train_index, cv_index in k_fold.split(np.zeros(len(X_train))
,y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
model.fit(X_train_fold, y_train_fold)
loglossTraining = log_loss(y_train_fold,
model.predict_proba(X_train_fold)[:,1])
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,:] = model.predict_proba(X_cv_fold)
loglossCV = log_loss(y_cv_fold,
predictionsBasedOnKFolds.loc[X_cv_fold.index,1])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,1]], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsLogisticRegression = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],
preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],
preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
n_estimators = 10
max_features = 'auto'
max_depth = None
min_samples_split = 2
min_samples_leaf = 1
min_weight_fraction_leaf = 0.0
max_leaf_nodes = None
bootstrap = True
oob_score = False
n_jobs = -1
random_state = 2018
class_weight = 'balanced'
RFC = RandomForestClassifier(n_estimators=n_estimators,
max_features=max_features, max_depth=max_depth,
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_leaf_nodes=max_leaf_nodes, bootstrap=bootstrap,
oob_score=oob_score, n_jobs=n_jobs, random_state=random_state,
class_weight=class_weight)
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=[0,1])
model = RFC
for train_index, cv_index in k_fold.split(np.zeros(len(X_train)),
y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
model.fit(X_train_fold, y_train_fold)
loglossTraining = log_loss(y_train_fold, model.predict_proba(X_train_fold)[:,1])
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,:] = model.predict_proba(X_cv_fold)
loglossCV = log_loss(y_cv_fold, predictionsBasedOnKFolds.loc[X_cv_fold.index,1])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,1]], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsRandomForests = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],
preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],
preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(
areaUnderROC))
plt.legend(loc="lower right")
plt.show()
params_xGB = {
'nthread':16,
'learning rate': 0.3,
'gamma': 0,
'max_depth': 6,
'min_child_weight': 1,
'max_delta_step': 0,
'subsample': 1.0,
'colsample_bytree': 1.0,
'objective':'binary:logistic',
'num_class':1,
'eval_metric':'logloss',
'seed':2018,
'silent':1
}
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=['prediction'])
for train_index, cv_index in k_fold.split(np.zeros(len(X_train)),
y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
dtrain = xgb.DMatrix(data=X_train_fold, label=y_train_fold)
dCV = xgb.DMatrix(data=X_cv_fold)
bst = xgb.cv(params_xGB, dtrain, num_boost_round=2000,
nfold=5, early_stopping_rounds=200, verbose_eval=50)
best_rounds = np.argmin(bst['test-logloss-mean'])
bst = xgb.train(params_xGB, dtrain, best_rounds)
loglossTraining = log_loss(y_train_fold, bst.predict(dtrain))
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'] = bst.predict(dCV)
loglossCV = log_loss(y_cv_fold, predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,'prediction']], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsXGBoostGradientBoosting = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
params_lightGB = {
'task': 'train',
'application':'binary',
'num_class':1,
'boosting': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'metric_freq':50,
'is_training_metric':False,
'max_depth':4,
'num_leaves': 31,
'learning_rate': 0.01,
'feature_fraction': 1.0,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'bagging_seed': 2018,
'verbose': 0,
'num_threads':16
}
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=['prediction'])
for train_index, cv_index in k_fold.split(np.zeros(len(X_train)),
y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
lgb_train = lgb.Dataset(X_train_fold, y_train_fold)
lgb_eval = lgb.Dataset(X_cv_fold, y_cv_fold, reference=lgb_train)
gbm = lgb.train(params_lightGB, lgb_train, num_boost_round=2000,
valid_sets=lgb_eval, early_stopping_rounds=200)
loglossTraining = log_loss(y_train_fold, gbm.predict(X_train_fold, num_iteration=gbm.best_iteration))
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'] = gbm.predict(X_cv_fold, num_iteration=gbm.best_iteration)
loglossCV = log_loss(y_cv_fold, predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,'prediction']], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsLightGBMGradientBoosting = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
#Testset
predictionsTestSetLogisticRegression = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
predictionsTestSetLogisticRegression.loc[:,'prediction'] = logReg.predict_proba(X_test)[:,1]
logLossTestSetLogisticRegression = log_loss(y_test, predictionsTestSetLogisticRegression)
predictionsTestSetRandomForests = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
predictionsTestSetRandomForests.loc[:,'prediction'] = RFC.predict_proba(X_test)[:,1]
logLossTestSetRandomForests = log_loss(y_test, predictionsTestSetRandomForests)
predictionsTestSetXGBoostGradientBoosting = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
dtest = xgb.DMatrix(data=X_test)
predictionsTestSetXGBoostGradientBoosting.loc[:,'prediction'] = bst.predict(dtest)
logLossTestSetXGBoostGradientBoosting = log_loss(y_test, predictionsTestSetXGBoostGradientBoosting)
predictionsTestSetLightGBMGradientBoosting = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
predictionsTestSetLightGBMGradientBoosting.loc[:,'prediction'] = gbm.predict(X_test, num_iteration=gbm.best_iteration)
logLossTestSetLightGBMGradientBoosting = log_loss(y_test, predictionsTestSetLightGBMGradientBoosting)
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetLogisticRegression)
average_precision = average_precision_score(y_test,predictionsTestSetLogisticRegression)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetLogisticRegression)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetRandomForests)
average_precision = average_precision_score(y_test,predictionsTestSetRandomForests)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetRandomForests)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetXGBoostGradientBoosting)
average_precision = average_precision_score(y_test,predictionsTestSetXGBoostGradientBoosting)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetXGBoostGradientBoosting)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetLightGBMGradientBoosting)
average_precision = average_precision_score(y_test,predictionsTestSetLightGBMGradientBoosting)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetLightGBMGradientBoosting)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
# Ensemble of previous algrithms
predictionsBasedOnKFoldsFourModels = pd.DataFrame(data=[],index=y_train.index)
predictionsBasedOnKFoldsFourModels = predictionsBasedOnKFoldsFourModels.join(
predictionsBasedOnKFoldsLogisticRegression['prediction'].astype(float), \
how='left').join(predictionsBasedOnKFoldsRandomForests['prediction'] \
.astype(float),how='left',rsuffix="2").join( \
predictionsBasedOnKFoldsXGBoostGradientBoosting['prediction'].astype(float), \
how='left',rsuffix="3").join( \
predictionsBasedOnKFoldsLightGBMGradientBoosting['prediction'].astype(float), \
how='left',rsuffix="4")
predictionsBasedOnKFoldsFourModels.columns = ['predsLR','predsRF','predsXGB','predsLightGBM']
X_trainWithPredictions = X_train.merge(predictionsBasedOnKFoldsFourModels,
left_index=True,right_index=True)
params_lightGB = {
'task': 'train',
'application':'binary',
'num_class':1,
'boosting': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'metric_freq':50,
'is_training_metric':False,
'max_depth':4,
'num_leaves': 31,
'learning_rate': 0.01,
'feature_fraction': 1.0,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'bagging_seed': 2018,
'verbose': 0,
'num_threads':16
}
trainingScores = []
cvScores = []
predictionsBasedOnKFoldsEnsemble = pd.DataFrame(data=[],index=y_train.index,columns=['prediction'])
for train_index, cv_index in k_fold.split(np.zeros(len(X_train)), y_train.ravel()):
X_train_fold, X_cv_fold = X_trainWithPredictions.iloc[train_index,:], X_trainWithPredictions.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
lgb_train = lgb.Dataset(X_train_fold, y_train_fold)
lgb_eval = lgb.Dataset(X_cv_fold, y_cv_fold, reference=lgb_train)
gbm = lgb.train(params_lightGB, lgb_train, num_boost_round=2000,
valid_sets=lgb_eval, early_stopping_rounds=200)
loglossTraining = log_loss(y_train_fold, gbm.predict(X_train_fold, num_iteration=gbm.best_iteration))
trainingScores.append(loglossTraining)
predictionsBasedOnKFoldsEnsemble.loc[X_cv_fold.index,'prediction'] = gbm.predict(X_cv_fold, num_iteration=gbm.best_iteration)
loglossCV = log_loss(y_cv_fold, predictionsBasedOnKFoldsEnsemble.loc[X_cv_fold.index,'prediction'])
cvScores.append(loglossCV)
predictions = pd.concat([y_train,predictionsBasedOnKFoldsEnsemble.loc[:,'prediction']], axis=1)
predictions.columns = ['trueLabel','prediction']
precision, recall, thresholds = precision_recall_curve(predictions['trueLabel'],predictions['prediction'])
average_precision = average_precision_score(predictions['trueLabel'],predictions['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
|
# pylint: disable=no-self-use,invalid-name
import torch
from pytorch_pretrained_bert.modeling import BertConfig, BertModel
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers.wordpiece_indexer import PretrainedBertIndexer
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.tokenizers.word_splitter import BertBasicWordSplitter
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders.bert_token_embedder import BertEmbedder
class TestBertEmbedder(ModelTestCase):
def setUp(self):
super().setUp()
vocab_path = self.FIXTURES_ROOT / 'bert' / 'vocab.txt'
self.token_indexer = PretrainedBertIndexer(str(vocab_path))
config_path = self.FIXTURES_ROOT / 'bert' / 'config.json'
config = BertConfig(str(config_path))
self.bert_model = BertModel(config)
self.token_embedder = BertEmbedder(self.bert_model)
def test_without_offsets(self):
input_ids = torch.LongTensor([[3, 5, 9, 1, 2], [1, 5, 0, 0, 0]])
result = self.token_embedder(input_ids)
assert list(result.shape) == [2, 5, 12]
def test_with_offsets(self):
input_ids = torch.LongTensor([[3, 5, 9, 1, 2], [1, 5, 0, 0, 0]])
offsets = torch.LongTensor([[0, 2, 4], [1, 0, 0]])
result = self.token_embedder(input_ids, offsets=offsets)
assert list(result.shape) == [2, 3, 12]
def test_end_to_end(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
# 2 3 4 3 5 6 8 9 2 14 12
sentence1 = "the quickest quick brown fox jumped over the lazy dog"
tokens1 = tokenizer.tokenize(sentence1)
# 2 3 5 6 8 9 2 15 10 11 14 1
sentence2 = "the quick brown fox jumped over the laziest lazy elmo"
tokens2 = tokenizer.tokenize(sentence2)
vocab = Vocabulary()
instance1 = Instance({"tokens": TextField(tokens1, {"bert": self.token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": self.token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
# 16 = [CLS], 17 = [SEP]
assert tokens["bert"].tolist() == [[16, 2, 3, 4, 3, 5, 6, 8, 9, 2, 14, 12, 17, 0],
[16, 2, 3, 5, 6, 8, 9, 2, 15, 10, 11, 14, 1, 17]]
assert tokens["bert-offsets"].tolist() == [[1, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[1, 2, 3, 4, 5, 6, 7, 10, 11, 12]]
# No offsets, should get 14 vectors back ([CLS] + 12 token wordpieces + [SEP])
bert_vectors = self.token_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 14, 12]
# Offsets, should get 10 vectors back.
bert_vectors = self.token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 10, 12]
# Now try top_layer_only = True
tlo_embedder = BertEmbedder(self.bert_model, top_layer_only=True)
bert_vectors = tlo_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 14, 12]
bert_vectors = tlo_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 10, 12]
def test_padding_for_equal_length_indices(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
# 2 3 5 6 8 9 2 14 12
sentence = "the quick brown fox jumped over the lazy dog"
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
instance = Instance({"tokens": TextField(tokens, {"bert": self.token_indexer})})
batch = Batch([instance])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
assert tokens["bert"].tolist() == [[16, 2, 3, 5, 6, 8, 9, 2, 14, 12, 17]]
assert tokens["bert-offsets"].tolist() == [[1, 2, 3, 4, 5, 6, 7, 8, 9]]
def test_squad_with_unwordpieceable_passage(self):
# pylint: disable=line-too-long
tokenizer = WordTokenizer()
token_indexer = PretrainedBertIndexer("bert-base-uncased")
passage1 = ("There were four major HDTV systems tested by SMPTE in the late 1970s, "
"and in 1979 an SMPTE study group released A Study of High Definition Television Systems:")
question1 = "Who released A Study of High Definition Television Systems?"
passage2 = ("Broca, being what today would be called a neurosurgeon, "
"had taken an interest in the pathology of speech. He wanted "
"to localize the difference between man and the other animals, "
"which appeared to reside in speech. He discovered the speech "
"center of the human brain, today called Broca's area after him. "
"His interest was mainly in Biological anthropology, but a German "
"philosopher specializing in psychology, Theodor Waitz, took up the "
"theme of general and social anthropology in his six-volume work, "
"entitled Die Anthropologie der Naturvölker, 1859–1864. The title was "
"""soon translated as "The Anthropology of Primitive Peoples". """
"The last two volumes were published posthumously.")
question2 = "What did Broca discover in the human brain?"
from allennlp.data.dataset_readers.reading_comprehension.util import make_reading_comprehension_instance
instance1 = make_reading_comprehension_instance(tokenizer.tokenize(question1),
tokenizer.tokenize(passage1),
{"bert": token_indexer},
passage1)
instance2 = make_reading_comprehension_instance(tokenizer.tokenize(question2),
tokenizer.tokenize(passage2),
{"bert": token_indexer},
passage2)
vocab = Vocabulary()
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
qtokens = tensor_dict["question"]
ptokens = tensor_dict["passage"]
config = BertConfig(len(token_indexer.vocab))
model = BertModel(config)
embedder = BertEmbedder(model)
_ = embedder(ptokens["bert"], offsets=ptokens["bert-offsets"])
_ = embedder(qtokens["bert"], offsets=qtokens["bert-offsets"])
def test_max_length(self):
config = BertConfig(len(self.token_indexer.vocab))
model = BertModel(config)
embedder = BertEmbedder(model)
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
sentence = "the " * 1000
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
instance = Instance({"tokens": TextField(tokens, {"bert": self.token_indexer})})
batch = Batch([instance])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
embedder(tokens["bert"], tokens["bert-offsets"])
def test_end_to_end_with_higher_order_inputs(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
# 2 3 4 3 5 6 8 9 2 14 12
sentence1 = "the quickest quick brown fox jumped over the lazy dog"
tokens1 = tokenizer.tokenize(sentence1)
text_field1 = TextField(tokens1, {"bert": self.token_indexer})
# 2 3 5 6 8 9 2 15 10 11 14 1
sentence2 = "the quick brown fox jumped over the laziest lazy elmo"
tokens2 = tokenizer.tokenize(sentence2)
text_field2 = TextField(tokens2, {"bert": self.token_indexer})
# 2 5 15 10 11 6
sentence3 = "the brown laziest fox"
tokens3 = tokenizer.tokenize(sentence3)
text_field3 = TextField(tokens3, {"bert": self.token_indexer})
vocab = Vocabulary()
instance1 = Instance({"tokens": ListField([text_field1])})
instance2 = Instance({"tokens": ListField([text_field2, text_field3])})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths, verbose=True)
tokens = tensor_dict["tokens"]
# No offsets, should get 14 vectors back ([CLS] + 12 wordpieces + [SEP])
bert_vectors = self.token_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 2, 14, 12]
# Offsets, should get 10 vectors back.
bert_vectors = self.token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 2, 10, 12]
# Now try top_layer_only = True
tlo_embedder = BertEmbedder(self.bert_model, top_layer_only=True)
bert_vectors = tlo_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [2, 2, 14, 12]
bert_vectors = tlo_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [2, 2, 10, 12]
def test_sliding_window(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
sentence = "the quickest quick brown fox jumped over the lazy dog"
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
vocab_path = self.FIXTURES_ROOT / 'bert' / 'vocab.txt'
token_indexer = PretrainedBertIndexer(str(vocab_path), truncate_long_sequences=False, max_pieces=8)
config_path = self.FIXTURES_ROOT / 'bert' / 'config.json'
config = BertConfig(str(config_path))
bert_model = BertModel(config)
token_embedder = BertEmbedder(bert_model, max_pieces=8)
instance = Instance({"tokens": TextField(tokens, {"bert": token_indexer})})
batch = Batch([instance])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
# 16 = [CLS], 17 = [SEP]
# 1 full window + 1 half window with start/end tokens
assert tokens["bert"].tolist() == [[16, 2, 3, 4, 3, 5, 6, 17,
16, 3, 5, 6, 8, 9, 2, 17,
16, 8, 9, 2, 14, 12, 17]]
assert tokens["bert-offsets"].tolist() == [[1, 3, 4, 5, 6, 7, 8, 9, 10, 11]]
bert_vectors = token_embedder(tokens["bert"])
assert list(bert_vectors.shape) == [1, 13, 12]
bert_vectors = token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert list(bert_vectors.shape) == [1, 10, 12]
def test_sliding_window_with_batch(self):
tokenizer = WordTokenizer(word_splitter=BertBasicWordSplitter())
sentence = "the quickest quick brown fox jumped over the lazy dog"
tokens = tokenizer.tokenize(sentence)
vocab = Vocabulary()
vocab_path = self.FIXTURES_ROOT / 'bert' / 'vocab.txt'
token_indexer = PretrainedBertIndexer(str(vocab_path), truncate_long_sequences=False, max_pieces=8)
config_path = self.FIXTURES_ROOT / 'bert' / 'config.json'
config = BertConfig(str(config_path))
bert_model = BertModel(config)
token_embedder = BertEmbedder(bert_model, max_pieces=8)
instance = Instance({"tokens": TextField(tokens, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens + tokens + tokens, {"bert": token_indexer})})
batch = Batch([instance, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
bert_vectors = token_embedder(tokens["bert"], offsets=tokens["bert-offsets"])
assert bert_vectors is not None
|
"""
Set of wrapper functions for gym environments taken from
https://github.com/Shmuma/ptan/blob/master/ptan/common/wrappers.py
"""
import collections
import numpy as np
import torch
from pl_bolts.utils import _GYM_AVAILABLE, _OPENCV_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _GYM_AVAILABLE:
import gym.spaces
from gym import make as gym_make
from gym import ObservationWrapper, Wrapper
else: # pragma: no-cover
warn_missing_pkg('gym')
Wrapper = object
ObservationWrapper = object
if _OPENCV_AVAILABLE:
import cv2
else:
warn_missing_pkg('cv2', pypi_name='opencv-python') # pragma: no-cover
class ToTensor(Wrapper):
"""For environments where the user need to press FIRE for the game to start."""
def __init__(self, env=None):
super(ToTensor, self).__init__(env)
def step(self, action):
"""Take 1 step and cast to tensor"""
state, reward, done, info = self.env.step(action)
return torch.tensor(state), torch.tensor(reward), done, info
def reset(self):
"""reset the env and cast to tensor"""
return torch.tensor(self.env.reset())
class FireResetEnv(Wrapper):
"""For environments where the user need to press FIRE for the game to start."""
def __init__(self, env=None):
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3
def step(self, action):
"""Take 1 step"""
return self.env.step(action)
def reset(self):
"""reset the env"""
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class MaxAndSkipEnv(Wrapper):
"""Return only every `skip`-th frame"""
def __init__(self, env=None, skip=4):
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = collections.deque(maxlen=2)
self._skip = skip
def step(self, action):
"""take 1 step"""
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ProcessFrame84(ObservationWrapper):
"""preprocessing images from env"""
def __init__(self, env=None):
if not _OPENCV_AVAILABLE:
raise ModuleNotFoundError('This class uses OpenCV which it is not installed yet.')
super(ProcessFrame84, self).__init__(env)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(84, 84, 1), dtype=np.uint8
)
def observation(self, obs):
"""preprocess the obs"""
return ProcessFrame84.process(obs)
@staticmethod
def process(frame):
"""image preprocessing, formats to 84x84"""
if frame.size == 210 * 160 * 3:
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
elif frame.size == 250 * 160 * 3:
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ImageToPyTorch(ObservationWrapper):
"""converts image to pytorch format"""
def __init__(self, env):
if not _OPENCV_AVAILABLE:
raise ModuleNotFoundError('This class uses OpenCV which it is not installed yet.')
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
new_shape = (old_shape[-1], old_shape[0], old_shape[1])
self.observation_space = gym.spaces.Box(
low=0.0, high=1.0, shape=new_shape, dtype=np.float32
)
@staticmethod
def observation(observation):
"""convert observation"""
return np.moveaxis(observation, 2, 0)
class ScaledFloatFrame(ObservationWrapper):
"""scales the pixels"""
@staticmethod
def observation(obs):
return np.array(obs).astype(np.float32) / 255.0
class BufferWrapper(ObservationWrapper):
""""Wrapper for image stacking"""
def __init__(self, env, n_steps, dtype=np.float32):
super(BufferWrapper, self).__init__(env)
self.dtype = dtype
self.buffer = None
old_space = env.observation_space
self.observation_space = gym.spaces.Box(
old_space.low.repeat(n_steps, axis=0),
old_space.high.repeat(n_steps, axis=0),
dtype=dtype,
)
def reset(self):
"""reset env"""
self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)
return self.observation(self.env.reset())
def observation(self, observation):
"""convert observation"""
self.buffer[:-1] = self.buffer[1:]
self.buffer[-1] = observation
return self.buffer
class DataAugmentation(ObservationWrapper):
"""
Carries out basic data augmentation on the env observations
- ToTensor
- GrayScale
- RandomCrop
"""
def __init__(self, env=None):
super().__init__(env)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(84, 84, 1), dtype=np.uint8
)
def observation(self, obs):
"""preprocess the obs"""
return ProcessFrame84.process(obs)
def make_environment(env_name):
"""Convert environment with wrappers"""
env = gym_make(env_name)
env = MaxAndSkipEnv(env)
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = ImageToPyTorch(env)
env = BufferWrapper(env, 4)
return ScaledFloatFrame(env)
|
from dnsimple2.resources import BaseResource
class UserResource(BaseResource):
fields = ('id', 'email', 'created_at', 'updated_at',)
def __init__(self, **kwargs):
self.id = kwargs.get('id')
self.email = kwargs.get('email')
self.created_at = self.parse_datetime(kwargs.get('created_at'))
self.updated_at = self.parse_datetime(kwargs.get('updated_at'))
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
from launch import LaunchDescription
from launch.actions import OpaqueFunction, IncludeLaunchDescription, DeclareLaunchArgument
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
from launch_ros.substitutions import FindPackageShare
from launch_ros.actions import Node
def launch_setup(context, *args, **kwargs):
prefix = LaunchConfiguration('prefix', default='')
hw_ns = LaunchConfiguration('hw_ns', default='xarm')
limited = LaunchConfiguration('limited', default=False)
effort_control = LaunchConfiguration('effort_control', default=False)
velocity_control = LaunchConfiguration('velocity_control', default=False)
add_gripper = LaunchConfiguration('add_gripper', default=False)
add_vacuum_gripper = LaunchConfiguration('add_vacuum_gripper', default=False)
dof = LaunchConfiguration('dof', default=7)
no_gui_ctrl = LaunchConfiguration('no_gui_ctrl', default=False)
add_other_geometry = LaunchConfiguration('add_other_geometry', default=False)
geometry_type = LaunchConfiguration('geometry_type', default='box')
geometry_mass = LaunchConfiguration('geometry_mass', default=0.1)
geometry_height = LaunchConfiguration('geometry_height', default=0.1)
geometry_radius = LaunchConfiguration('geometry_radius', default=0.1)
geometry_length = LaunchConfiguration('geometry_length', default=0.1)
geometry_width = LaunchConfiguration('geometry_width', default=0.1)
geometry_mesh_filename = LaunchConfiguration('geometry_mesh_filename', default='')
geometry_mesh_origin_xyz = LaunchConfiguration('geometry_mesh_origin_xyz', default='"0 0 0"')
geometry_mesh_origin_rpy = LaunchConfiguration('geometry_mesh_origin_rpy', default='"0 0 0"')
geometry_mesh_tcp_xyz = LaunchConfiguration('geometry_mesh_tcp_xyz', default='"0 0 0"')
geometry_mesh_tcp_rpy = LaunchConfiguration('geometry_mesh_tcp_rpy', default='"0 0 0"')
ros2_control_plugin = 'xarm_control/FakeXArmHW'
controllers_name = 'fake_controllers'
moveit_controller_manager_key = 'moveit_simple_controller_manager'
moveit_controller_manager_value = 'moveit_simple_controller_manager/MoveItSimpleControllerManager'
xarm_type = 'xarm{}'.format(dof.perform(context))
ros_namespace = LaunchConfiguration('ros_namespace', default='').perform(context)
# robot description launch
# xarm_description/launch/_xarm_robot_description.launch.py
robot_description_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_description'), 'launch', '_xarm_robot_description.launch.py'])),
launch_arguments={
'prefix': prefix,
'hw_ns': hw_ns,
'limited': limited,
'effort_control': effort_control,
'velocity_control': velocity_control,
'add_gripper': add_gripper,
'add_vacuum_gripper': add_vacuum_gripper,
'dof': dof,
'ros2_control_plugin': ros2_control_plugin,
'joint_states_remapping': 'joint_states',
'add_other_geometry': add_other_geometry,
'geometry_type': geometry_type,
'geometry_mass': geometry_mass,
'geometry_height': geometry_height,
'geometry_radius': geometry_radius,
'geometry_length': geometry_length,
'geometry_width': geometry_width,
'geometry_mesh_filename': geometry_mesh_filename,
'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz,
'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy,
'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz,
'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy,
}.items(),
)
# xarm moveit common launch
# xarm_moveit_config/launch/_xarm_moveit_common.launch.py
xarm_moveit_common_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_moveit_config'), 'launch', '_xarm_moveit_common.launch.py'])),
launch_arguments={
'prefix': prefix,
'hw_ns': hw_ns,
'limited': limited,
'effort_control': effort_control,
'velocity_control': velocity_control,
'add_gripper': add_gripper,
'add_vacuum_gripper': add_vacuum_gripper,
'dof': dof,
'no_gui_ctrl': no_gui_ctrl,
'ros2_control_plugin': ros2_control_plugin,
'controllers_name': controllers_name,
'moveit_controller_manager_key': moveit_controller_manager_key,
'moveit_controller_manager_value': moveit_controller_manager_value,
'add_other_geometry': add_other_geometry,
'geometry_type': geometry_type,
'geometry_mass': geometry_mass,
'geometry_height': geometry_height,
'geometry_radius': geometry_radius,
'geometry_length': geometry_length,
'geometry_width': geometry_width,
'geometry_mesh_filename': geometry_mesh_filename,
'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz,
'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy,
'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz,
'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy,
}.items(),
)
remappings = [
('follow_joint_trajectory', '{}{}_traj_controller/follow_joint_trajectory'.format(prefix.perform(context), xarm_type)),
]
controllers = ['{}{}_traj_controller'.format(prefix.perform(context), xarm_type)]
if add_gripper.perform(context) in ('True', 'true'):
remappings.append(
('follow_joint_trajectory', '{}xarm_gripper_traj_controller/follow_joint_trajectory'.format(prefix.perform(context)))
)
controllers.append('{}xarm_gripper_traj_controller'.format(prefix.perform(context)))
# joint state publisher node
joint_state_publisher_node = Node(
package='joint_state_publisher',
executable='joint_state_publisher',
name='joint_state_publisher',
output='screen',
parameters=[{'source_list': ['joint_states']}],
remappings=remappings,
)
# ros2 control launch
# xarm_controller/launch/_ros2_control.launch.py
ros2_control_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_controller'), 'launch', '_ros2_control.launch.py'])),
launch_arguments={
'prefix': prefix,
'hw_ns': hw_ns,
'limited': limited,
'effort_control': effort_control,
'velocity_control': velocity_control,
'add_gripper': add_gripper,
'add_vacuum_gripper': add_vacuum_gripper,
'dof': dof,
'ros2_control_plugin': ros2_control_plugin,
'add_other_geometry': add_other_geometry,
'geometry_type': geometry_type,
'geometry_mass': geometry_mass,
'geometry_height': geometry_height,
'geometry_radius': geometry_radius,
'geometry_length': geometry_length,
'geometry_width': geometry_width,
'geometry_mesh_filename': geometry_mesh_filename,
'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz,
'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy,
'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz,
'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy,
}.items(),
)
# Load controllers
load_controllers = []
for controller in controllers:
load_controllers.append(Node(
package='controller_manager',
executable='spawner.py',
output='screen',
arguments=[
controller,
'--controller-manager', '{}/controller_manager'.format(ros_namespace)
],
))
return [
robot_description_launch,
xarm_moveit_common_launch,
joint_state_publisher_node,
ros2_control_launch,
] + load_controllers
def generate_launch_description():
return LaunchDescription([
OpaqueFunction(function=launch_setup)
])
|
# -*- coding: utf-8 -*-
from collections import defaultdict
try:
import numpy as np
except ImportError:
np = None
import pytest
from ..util.testing import requires
from ..units import (
amount,
allclose,
concatenate,
concentration,
fold_constants,
energy,
get_derived_unit,
is_unitless,
linspace,
logspace_from_lin,
SI_base_registry,
unitless_in_registry,
format_string,
get_physical_dimensionality,
to_unitless,
length,
magnitude,
mass,
time,
default_unit_in_registry,
Backend,
latex_of_unit,
unit_of,
unit_registry_to_human_readable,
units_library,
volume,
simplified,
uniform,
unit_registry_from_human_readable,
_sum,
UncertainQuantity,
compare_equality,
default_units as u,
patched_numpy as pnp,
default_constants as dc,
)
def test_dimensionality():
assert mass + 2 * length - 2 * time == energy
assert amount - 3 * length == concentration
assert 3 * length == volume
@requires(units_library)
def test_default_units():
u.metre
u.second
u.hour
u.decimetre
u.mole
u.kilogram
u.ampere
u.kelvin
u.candela
u.molar
u.per100eV
u.joule
u.gray
u.eV
u.MeV
u.metre
u.decimetre
u.centimetre
u.micrometre
u.nanometre
u.gram
u.molar
u.hour
u.perMolar_perSecond
u.per100eV
u.umol
u.umol_per_J
@requires(units_library)
def test_allclose():
assert allclose(42, 42)
assert allclose(42 * u.meter, 0.042 * u.km)
assert not allclose(42, 43)
assert not allclose(42, 42 * u.meter)
assert not allclose(42, 43 * u.meter)
assert not allclose(42 * u.meter, 42)
a = np.linspace(2, 3) * u.second
b = np.linspace(2 / 3600.0, 3 / 3600.0) * u.hour
assert allclose(a, b)
assert allclose(
[3600 * u.second, 2 * u.metre / u.hour],
[1 * u.hour, 2 / 3600 * u.metre / u.second],
)
c1 = [[3000, 4000], [3000, 4000]] * u.mol / u.metre ** 3
c2 = [[3000, 4000], [436.2, 5281.89]] * u.mol / u.metre ** 3
assert not allclose(c1, c2)
assert allclose(0 * u.second, 0 * u.second)
# Possibly allow comparison with scalars in future (broadcasting):
# assert allclose(2, [2, 2])
# assert allclose([2, 2], 2)
# assert not allclose(2, [2, 3])
# assert not allclose([2, 3], 2)
# assert allclose(2*u.second, [2, 2]*u.second)
# assert allclose([2, 2]*u.second, 2*u.second)
# assert not allclose(2*u.second, [2, 3]*u.second)
# assert not allclose([2, 3]*u.second, 2*u.second)
@requires(units_library)
def test_is_unitless():
assert not is_unitless(1 * u.second)
assert is_unitless(1)
assert is_unitless({"a": 1, "b": 2.0})
assert not is_unitless({"a": 2, "b": 5.0 * u.second, "c": 3})
assert is_unitless(7 * u.molar / u.mole * u.dm3)
assert is_unitless([2, 3, 4])
assert not is_unitless([2 * u.m, 3 * u.m])
assert not is_unitless([3, 4 * u.m])
assert is_unitless(u.dimensionless) # this was causing RecursionError
@requires(units_library)
def test_unit_of():
assert compare_equality(unit_of(0.1 * u.metre / u.second), u.metre / u.second)
assert not compare_equality(
unit_of(0.1 * u.metre / u.second), u.kilometre / u.second
)
assert compare_equality(unit_of(7), 1)
assert unit_of(u.gray).dimensionality == u.gray.dimensionality
ref = (u.joule / u.kg).simplified.dimensionality
assert unit_of(u.gray, simplified=True).dimensionality == ref
assert compare_equality(unit_of(dict(foo=3 * u.molar, bar=2 * u.molar)), u.molar)
assert not compare_equality(
unit_of(dict(foo=3 * u.molar, bar=2 * u.molar)), u.second
)
with pytest.raises(Exception):
unit_of(dict(foo=3 * u.molar, bar=2 * u.second))
assert not compare_equality(
unit_of(dict(foo=3 * u.molar, bar=2 * u.molar)), u.mol / u.metre ** 3
)
@requires(units_library)
def test_to_unitless():
dm = u.decimetre
vals = [1.0 * dm, 2.0 * dm]
result = to_unitless(vals, u.metre)
assert result[0] == 0.1
assert result[1] == 0.2
with pytest.raises(ValueError):
to_unitless([42, 43], u.metre)
with pytest.raises(ValueError):
to_unitless(np.array([42, 43]), u.metre)
vals = [1.0, 2.0] * dm
result = to_unitless(vals, u.metre)
assert result[0] == 0.1
assert result[1] == 0.2
length_unit = 1000 * u.metre
result = to_unitless(1.0 * u.metre, length_unit)
assert abs(result - 1e-3) < 1e-12
amount_unit = 1e-9 # nano
assert abs(to_unitless(1.0, amount_unit) - 1e9) < 1e-6
assert (
abs(
to_unitless(3 / (u.second * u.molar), u.metre ** 3 / u.mole / u.second)
- 3e-3
)
< 1e-12
)
assert abs(to_unitless(2 * u.dm3, u.cm3) - 2000) < 1e-12
assert abs(to_unitless(2 * u.m3, u.dm3) - 2000) < 1e-12
assert (float(to_unitless(UncertainQuantity(2, u.dm3, 0.3), u.cm3)) - 2000) < 1e-12
g1 = UncertainQuantity(4.46, u.per100eV, 0)
g_unit = get_derived_unit(SI_base_registry, "radiolytic_yield")
assert abs(to_unitless(g1, g_unit) - 4.46 * 1.036e-7) < 1e-9
g2 = UncertainQuantity(-4.46, u.per100eV, 0)
assert abs(to_unitless(-g2, g_unit) - 4.46 * 1.036e-7) < 1e-9
vals = np.array([1.0 * dm, 2.0 * dm], dtype=object)
result = to_unitless(vals, u.metre)
assert result[0] == 0.1
assert result[1] == 0.2
one_billionth_molar_in_nanomolar = to_unitless(1e-9 * u.molar, u.nanomolar)
assert one_billionth_molar_in_nanomolar == 1
@requires(units_library)
def test_UncertainQuantity():
a = UncertainQuantity([1, 2], u.m, [0.1, 0.2])
assert a[1] == [2.0] * u.m
assert (-a)[0] == [-1.0] * u.m
assert (-a).uncertainty[0] == [0.1] * u.m
assert (-a)[0] == (a * -1)[0]
assert (-a).uncertainty[0] == (a * -1).uncertainty[0]
assert allclose(a, [1, 2] * u.m)
@requires(units_library, "sympy")
def test_to_unitless__sympy():
import sympy as sp
assert sp.cos(to_unitless(sp.pi)) == -1
with pytest.raises(AttributeError):
to_unitless(sp.pi, u.second)
@requires(units_library)
def test_linspace():
ls = linspace(2 * u.second, 3 * u.second)
assert abs(to_unitless(ls[0], u.hour) - 2 / 3600.0) < 1e-15
@requires(units_library)
def test_logspace_from_lin():
ls = logspace_from_lin(2 * u.second, 3 * u.second)
assert abs(to_unitless(ls[0], u.hour) - 2 / 3600.0) < 1e-15
assert abs(to_unitless(ls[-1], u.hour) - 3 / 3600.0) < 1e-15
@requires(units_library)
def test_get_derived_unit():
registry = SI_base_registry.copy()
registry["length"] = 1e-1 * registry["length"]
conc_unit = get_derived_unit(registry, "concentration")
dm = u.decimetre
assert abs(conc_unit - 1 * u.mole / (dm ** 3)) < 1e-12 * u.mole / (dm ** 3)
registry = defaultdict(lambda: 1)
registry["amount"] = 1e-9 # nano
assert (
abs(to_unitless(1.0, get_derived_unit(registry, "concentration")) - 1e9) < 1e-6
)
@requires(units_library)
def test_unit_registry_to_human_readable():
# Not as much human readable as JSON serializable...
d = defaultdict(lambda: 1)
assert unit_registry_to_human_readable(d) == dict(
(x, (1, 1)) for x in SI_base_registry.keys()
)
ur = {
"length": 1e3 * u.metre,
"mass": 1e-2 * u.kilogram,
"time": 1e4 * u.second,
"current": 1e-1 * u.ampere,
"temperature": 1e1 * u.kelvin,
"luminous_intensity": 1e-3 * u.candela,
"amount": 1e4 * u.mole,
}
assert unit_registry_to_human_readable(ur) == {
"length": (1e3, "m"),
"mass": (1e-2, "kg"),
"time": (1e4, "s"),
"current": (1e-1, "A"),
"temperature": (1e1, "K"),
"luminous_intensity": (1e-3, "cd"),
"amount": (1e4, "mol"),
}
assert unit_registry_to_human_readable(ur) != {
"length": (1e2, "m"),
"mass": (1e-2, "kg"),
"time": (1e4, "s"),
"current": (1e-1, "A"),
"temperature": (1e1, "K"),
"luminous_intensity": (1e-3, "cd"),
"amount": (1e4, "mol"),
}
@requires(units_library)
def test_unit_registry_from_human_readable():
hr = unit_registry_to_human_readable(defaultdict(lambda: 1))
assert hr == dict((x, (1, 1)) for x in SI_base_registry.keys())
ur = unit_registry_from_human_readable(hr)
assert ur == dict((x, 1) for x in SI_base_registry.keys())
hr = unit_registry_to_human_readable(SI_base_registry)
assert hr == {
"length": (1.0, "m"),
"mass": (1.0, "kg"),
"time": (1.0, "s"),
"current": (1.0, "A"),
"temperature": (1.0, "K"),
"luminous_intensity": (1.0, "cd"),
"amount": (1.0, "mol"),
}
ur = unit_registry_from_human_readable(hr)
assert ur == SI_base_registry
ur = unit_registry_from_human_readable(
{
"length": (1.0, "m"),
"mass": (1.0, "kg"),
"time": (1.0, "s"),
"current": (1.0, "A"),
"temperature": (1.0, "K"),
"luminous_intensity": (1.0, "cd"),
"amount": (1.0, "mol"),
}
)
assert ur == {
"length": u.metre,
"mass": u.kilogram,
"time": u.second,
"current": u.ampere,
"temperature": u.kelvin,
"luminous_intensity": u.candela,
"amount": u.mole,
}
ur = unit_registry_from_human_readable(
{
"length": (1e3, "m"),
"mass": (1e-2, "kg"),
"time": (1e4, "s"),
"current": (1e-1, "A"),
"temperature": (1e1, "K"),
"luminous_intensity": (1e-3, "cd"),
"amount": (1e4, "mol"),
}
)
assert ur == {
"length": 1e3 * u.metre,
"mass": 1e-2 * u.kilogram,
"time": 1e4 * u.second,
"current": 1e-1 * u.ampere,
"temperature": 1e1 * u.kelvin,
"luminous_intensity": 1e-3 * u.candela,
"amount": 1e4 * u.mole,
}
assert ur != {
"length": 1e2 * u.metre,
"mass": 1e-3 * u.kilogram,
"time": 1e2 * u.second,
"current": 1e-2 * u.ampere,
"temperature": 1e0 * u.kelvin,
"luminous_intensity": 1e-2 * u.candela,
"amount": 1e3 * u.mole,
}
@requires(units_library)
def test_unitless_in_registry():
mag = magnitude(unitless_in_registry(3 * u.per100eV, SI_base_registry))
ref = 3 * 1.0364268834527753e-07
assert abs(mag - ref) < 1e-14
ul = unitless_in_registry([3 * u.per100eV, 5 * u.mol / u.J], SI_base_registry)
assert allclose(ul, [ref, 5], rtol=1e-6)
@requires(units_library)
def test_compare_equality():
assert compare_equality(3 * u.m, 3 * u.m)
assert compare_equality(3 * u.m, 3e-3 * u.km)
assert compare_equality(3e3 * u.mm, 3 * u.m)
assert not compare_equality(3 * u.m, 2 * u.m)
assert not compare_equality(3 * u.m, 3 * u.s)
assert not compare_equality(3 * u.m, 3 * u.m ** 2)
assert not compare_equality(3 * u.m, np.array(3))
assert not compare_equality(np.array(3), 3 * u.m)
assert compare_equality([3, None], [3, None])
assert not compare_equality([3, None, 3], [3, None, None])
assert not compare_equality([None, None, 3], [None, None, 2])
assert compare_equality([3 * u.m, None], [3, None])
assert not compare_equality([3 * u.m, None], [3 * u.km, None])
@requires(units_library)
def test_get_physical_dimensionality():
assert get_physical_dimensionality(3 * u.mole) == {"amount": 1}
assert get_physical_dimensionality([3 * u.mole]) == {"amount": 1}
assert get_physical_dimensionality(42) == {}
@requires(units_library)
def test_default_unit_in_registry():
mol_per_m3 = default_unit_in_registry(3 * u.molar, SI_base_registry)
assert magnitude(mol_per_m3) == 1
assert mol_per_m3 == u.mole / u.metre ** 3
assert default_unit_in_registry(3, SI_base_registry) == 1
assert default_unit_in_registry(3.0, SI_base_registry) == 1
@requires(units_library)
def test__sum():
# sum() does not work here...
assert (_sum([0.1 * u.metre, 1 * u.decimetre]) - 2 * u.decimetre) / u.metre == 0
@requires(units_library)
def test_Backend():
b = Backend()
with pytest.raises(ValueError):
b.exp(-3 * u.metre)
assert abs(b.exp(1234 * u.metre / u.kilometre) - b.exp(1.234)) < 1e-14
@requires(units_library, "numpy")
def test_Backend__numpy():
import numpy as np
b = Backend(np)
b.sum([1000 * u.metre / u.kilometre, 1], axis=0) == 2.0
with pytest.raises(AttributeError):
b.Piecewise
@requires("sympy")
def test_Backend__sympy():
b = Backend("sympy")
b.sin(b.pi) == 0
with pytest.raises(AttributeError):
b.min
@requires(units_library)
def test_format_string():
assert format_string(3 * u.gram / u.metre ** 2) == ("3", "g/m**2")
assert format_string(3 * u.gram / u.metre ** 2, tex=True) == (
"3",
r"\mathrm{\frac{g}{m^{2}}}",
)
@requires(units_library)
def test_joule_html():
joule_htm = "kg⋅m<sup>2</sup>/s<sup>2</sup>"
joule = u.J.dimensionality.simplified
assert joule.html == joule_htm
@requires(units_library)
def test_latex_of_unit():
assert latex_of_unit(u.gram / u.metre ** 2) == r"\mathrm{\frac{g}{m^{2}}}"
@requires(units_library)
def test_concatenate():
a = [1, 2] * u.metre
b = [2, 3] * u.mm
ref = [1, 2, 2e-3, 3e-3] * u.metre
assert allclose(concatenate((a, b)), ref)
@requires(units_library)
def test_pow0():
a = [1, 2] * u.metre
b = a ** 0
assert allclose(b, [1, 1])
c = a ** 2
assert allclose(c, [1, 4] * u.m ** 2)
@requires(units_library)
def test_patched_numpy():
# see https://github.com/python-quantities/python-quantities/issues/152
assert allclose(pnp.exp(3 * u.joule / (2 * u.cal)), 1.43119335, rtol=1e-5)
for arg in ([1, 2], [[1], [2]], [1], 2):
assert np.all(pnp.exp(arg) == np.exp(arg))
@requires(units_library)
def test_tile():
a = [2 * u.m, 3 * u.km]
assert allclose(pnp.tile(a, 2), [2 * u.m, 3000 * u.m, 2e-3 * u.km, 3 * u.km])
@requires(units_library)
def test_simplified():
assert allclose(
simplified(dc.molar_gas_constant), 8.314 * u.J / u.mol / u.K, rtol=2e-3
)
assert simplified(2.0) == 2.0
@requires(units_library)
def test_polyfit_polyval():
p1 = pnp.polyfit([0, 1, 2], [0, 1, 4], 2)
assert allclose(p1, [1, 0, 0], atol=1e-14)
assert allclose(pnp.polyval(p1, 3), 9)
assert allclose(pnp.polyval(p1, [4, 5]), [16, 25])
p2 = pnp.polyfit([0, 1, 2] * u.s, [0, 1, 4] * u.m, 2)
for _p, _r, _a in zip(
p2,
[1 * u.m / u.s ** 2, 0 * u.m / u.s, 0 * u.m],
[0 * u.m / u.s ** 2, 1e-15 * u.m / u.s, 1e-15 * u.m],
):
assert allclose(_p, _r, atol=_a)
assert allclose(pnp.polyval(p2, 3 * u.s), 9 * u.m)
assert allclose(pnp.polyval(p2, [4, 5] * u.s), [16, 25] * u.m)
@requires(units_library)
def test_uniform():
base = [3 * u.km, 200 * u.m]
refs = [np.array([3000, 200]), np.array([3, 0.2])]
def _check(case, ref):
assert np.any(np.all(magnitude(uniform(case)) == ref, axis=1))
_check(base, refs)
_check(tuple(base), refs)
keys = "foo bar".split()
assert magnitude(uniform(dict(zip(keys, base)))) in [
dict(zip(keys, r)) for r in refs
]
@requires(units_library)
def test_fold_constants():
assert abs(fold_constants(dc.pi) - np.pi) < 1e-15
@requires("numpy")
def test_to_unitless___0D_array_with_object():
from ..util._expr import Constant
# b = Backend('sympy')
# pi = np.array(b.pi)
pi = np.array(Constant(np.pi))
one_thousand = to_unitless(pi * u.metre, u.millimeter)
assert get_physical_dimensionality(one_thousand) == {}
assert abs(magnitude(one_thousand) - np.arctan(1) * 4e3) < 1e-12
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import tempfile
import os
from .. import logger as pyyaks_logger
import six
def test_suppress_newline():
stdout = six.StringIO()
logger = pyyaks_logger.get_logger(level=pyyaks_logger.INFO, stream=stdout)
for handler in logger.handlers:
handler.suppress_newline = True
logger.info('Info')
logger.warning('Warning')
for handler in logger.handlers:
handler.suppress_newline = False
logger.info('Info')
logger.warning('Warning')
assert stdout.getvalue() == "InfoWarningInfo\nWarning\n"
def test_suppress_newline_cm():
stdout = six.StringIO()
logger = pyyaks_logger.get_logger(level=pyyaks_logger.INFO, stream=stdout)
with pyyaks_logger.newlines_suppressed(logger):
logger.info('Info')
logger.warning('Warning')
logger.info('Info')
logger.warning('Warning')
assert stdout.getvalue() == "InfoWarningInfo\nWarning\n"
def test_stream():
stdout = six.StringIO()
logger = pyyaks_logger.get_logger(level=pyyaks_logger.INFO, stream=stdout)
logger.debug('Debug')
logger.info('Info')
logger.warning('Warning')
assert stdout.getvalue() == "Info\nWarning\n"
def test_file(tmpdir):
tmp = os.path.join(tmpdir, 'tmp.log')
logger = pyyaks_logger.get_logger(filename=tmp, stream=None)
logger.debug('Debug')
logger.info('Info')
logger.warning('Warning')
assert open(tmp).read() == "Info\nWarning\n"
def test_redefine(tmpdir):
stdout1 = six.StringIO()
stdout2 = six.StringIO()
tmp1 = os.path.join(tmpdir, 'tmp1.log')
tmp2 = os.path.join(tmpdir, 'tmp2.log')
logger = pyyaks_logger.get_logger(filename=tmp1, stream=stdout1, filelevel=pyyaks_logger.WARNING)
logger.debug('Debug1')
logger.info('Info1')
logger.warning('Warning1')
logger = pyyaks_logger.get_logger(filename=tmp2, stream=stdout2, level=pyyaks_logger.DEBUG)
logger.debug('Debug2')
logger.info('Info2')
logger.warning('Warning2')
assert open(tmp1).read() == "Warning1\n"
assert stdout1.getvalue() == "Info1\nWarning1\n"
assert open(tmp2).read() == "Debug2\nInfo2\nWarning2\n"
assert stdout2.getvalue() == "Debug2\nInfo2\nWarning2\n"
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ApiTypeError, ApiValueError # noqa: F401
class ProjectsV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def archive_project(self, owner, project, **kwargs): # noqa: E501
"""Archive project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_project(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.archive_project_with_http_info(
owner, project, **kwargs
) # noqa: E501
def archive_project_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Archive project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_project_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method archive_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `archive_project`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `archive_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/archive",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def bookmark_project(self, owner, project, **kwargs): # noqa: E501
"""Bookmark project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bookmark_project(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.bookmark_project_with_http_info(
owner, project, **kwargs
) # noqa: E501
def bookmark_project_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Bookmark project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bookmark_project_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method bookmark_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `bookmark_project`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `bookmark_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/bookmark",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def create_project(self, owner, body, **kwargs): # noqa: E501
"""Create new project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Project body: Project body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_project_with_http_info(owner, body, **kwargs) # noqa: E501
def create_project_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Create new project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Project body: Project body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `create_project`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `create_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/projects/create",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Project", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def delete_project(self, owner, project, **kwargs): # noqa: E501
"""Delete project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.delete_project_with_http_info(
owner, project, **kwargs
) # noqa: E501
def delete_project_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Delete project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `delete_project`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `delete_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def disable_project_ci(self, owner, project, **kwargs): # noqa: E501
"""Disbale project CI # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.disable_project_ci(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.disable_project_ci_with_http_info(
owner, project, **kwargs
) # noqa: E501
def disable_project_ci_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Disbale project CI # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.disable_project_ci_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method disable_project_ci" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `disable_project_ci`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `disable_project_ci`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/ci",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def enable_project_ci(self, owner, project, **kwargs): # noqa: E501
"""Enable project CI # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_project_ci(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.enable_project_ci_with_http_info(
owner, project, **kwargs
) # noqa: E501
def enable_project_ci_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Enable project CI # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_project_ci_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method enable_project_ci" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `enable_project_ci`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `enable_project_ci`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/ci",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def fetch_project_teams(self, owner, project, **kwargs): # noqa: E501
"""Get project teams # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fetch_project_teams(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ProjectTeams
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.fetch_project_teams_with_http_info(
owner, project, **kwargs
) # noqa: E501
def fetch_project_teams_with_http_info(
self, owner, project, **kwargs
): # noqa: E501
"""Get project teams # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.fetch_project_teams_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ProjectTeams, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method fetch_project_teams" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `fetch_project_teams`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `fetch_project_teams`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/teams",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ProjectTeams", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_project(self, owner, project, **kwargs): # noqa: E501
"""Get project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_project_with_http_info(owner, project, **kwargs) # noqa: E501
def get_project_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Get project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `get_project`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `get_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Project", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_project_settings(self, owner, project, **kwargs): # noqa: E501
"""Get Project settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_settings(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ProjectSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_project_settings_with_http_info(
owner, project, **kwargs
) # noqa: E501
def get_project_settings_with_http_info(
self, owner, project, **kwargs
): # noqa: E501
"""Get Project settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_settings_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ProjectSettings, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project_settings" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `get_project_settings`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `get_project_settings`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/settings",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ProjectSettings", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def list_archived_projects(self, user, **kwargs): # noqa: E501
"""List archived projects for user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_archived_projects(user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user: User (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListProjectsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.list_archived_projects_with_http_info(user, **kwargs) # noqa: E501
def list_archived_projects_with_http_info(self, user, **kwargs): # noqa: E501
"""List archived projects for user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_archived_projects_with_http_info(user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user: User (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListProjectsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["user", "offset", "limit", "sort", "query"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_archived_projects" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'user' is set
if self.api_client.client_side_validation and (
"user" not in local_var_params
or local_var_params["user"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `user` when calling `list_archived_projects`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "user" in local_var_params:
path_params["user"] = local_var_params["user"] # noqa: E501
query_params = []
if (
"offset" in local_var_params and local_var_params["offset"] is not None
): # noqa: E501
query_params.append(("offset", local_var_params["offset"])) # noqa: E501
if (
"limit" in local_var_params and local_var_params["limit"] is not None
): # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
if (
"sort" in local_var_params and local_var_params["sort"] is not None
): # noqa: E501
query_params.append(("sort", local_var_params["sort"])) # noqa: E501
if (
"query" in local_var_params and local_var_params["query"] is not None
): # noqa: E501
query_params.append(("query", local_var_params["query"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/archives/{user}/projects",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ListProjectsResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def list_bookmarked_projects(self, user, **kwargs): # noqa: E501
"""List bookmarked projects for user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_bookmarked_projects(user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user: User (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListProjectsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.list_bookmarked_projects_with_http_info(
user, **kwargs
) # noqa: E501
def list_bookmarked_projects_with_http_info(self, user, **kwargs): # noqa: E501
"""List bookmarked projects for user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_bookmarked_projects_with_http_info(user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user: User (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListProjectsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["user", "offset", "limit", "sort", "query"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_bookmarked_projects" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'user' is set
if self.api_client.client_side_validation and (
"user" not in local_var_params
or local_var_params["user"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `user` when calling `list_bookmarked_projects`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "user" in local_var_params:
path_params["user"] = local_var_params["user"] # noqa: E501
query_params = []
if (
"offset" in local_var_params and local_var_params["offset"] is not None
): # noqa: E501
query_params.append(("offset", local_var_params["offset"])) # noqa: E501
if (
"limit" in local_var_params and local_var_params["limit"] is not None
): # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
if (
"sort" in local_var_params and local_var_params["sort"] is not None
): # noqa: E501
query_params.append(("sort", local_var_params["sort"])) # noqa: E501
if (
"query" in local_var_params and local_var_params["query"] is not None
): # noqa: E501
query_params.append(("query", local_var_params["query"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/bookmarks/{user}/projects",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ListProjectsResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def list_project_names(self, owner, **kwargs): # noqa: E501
"""List project names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_project_names(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListProjectsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.list_project_names_with_http_info(owner, **kwargs) # noqa: E501
def list_project_names_with_http_info(self, owner, **kwargs): # noqa: E501
"""List project names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_project_names_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListProjectsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "offset", "limit", "sort", "query"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_project_names" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `list_project_names`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
query_params = []
if (
"offset" in local_var_params and local_var_params["offset"] is not None
): # noqa: E501
query_params.append(("offset", local_var_params["offset"])) # noqa: E501
if (
"limit" in local_var_params and local_var_params["limit"] is not None
): # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
if (
"sort" in local_var_params and local_var_params["sort"] is not None
): # noqa: E501
query_params.append(("sort", local_var_params["sort"])) # noqa: E501
if (
"query" in local_var_params and local_var_params["query"] is not None
): # noqa: E501
query_params.append(("query", local_var_params["query"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/projects/names",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ListProjectsResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def list_projects(self, owner, **kwargs): # noqa: E501
"""List projects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_projects(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListProjectsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.list_projects_with_http_info(owner, **kwargs) # noqa: E501
def list_projects_with_http_info(self, owner, **kwargs): # noqa: E501
"""List projects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_projects_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListProjectsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "offset", "limit", "sort", "query"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_projects" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `list_projects`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
query_params = []
if (
"offset" in local_var_params and local_var_params["offset"] is not None
): # noqa: E501
query_params.append(("offset", local_var_params["offset"])) # noqa: E501
if (
"limit" in local_var_params and local_var_params["limit"] is not None
): # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
if (
"sort" in local_var_params and local_var_params["sort"] is not None
): # noqa: E501
query_params.append(("sort", local_var_params["sort"])) # noqa: E501
if (
"query" in local_var_params and local_var_params["query"] is not None
): # noqa: E501
query_params.append(("query", local_var_params["query"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/projects/list",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ListProjectsResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_project(self, owner, project_name, body, **kwargs): # noqa: E501
"""Patch project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_project(owner, project_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project_name: Required name (required)
:param V1Project body: Project body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.patch_project_with_http_info(
owner, project_name, body, **kwargs
) # noqa: E501
def patch_project_with_http_info(
self, owner, project_name, body, **kwargs
): # noqa: E501
"""Patch project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_project_with_http_info(owner, project_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project_name: Required name (required)
:param V1Project body: Project body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project_name", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `patch_project`"
) # noqa: E501
# verify the required parameter 'project_name' is set
if self.api_client.client_side_validation and (
"project_name" not in local_var_params
or local_var_params["project_name"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project_name` when calling `patch_project`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project_name" in local_var_params:
path_params["project.name"] = local_var_params["project_name"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project.name}",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Project", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_project_settings(self, owner, project, body, **kwargs): # noqa: E501
"""Patch project settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_project_settings(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectSettings body: Project settings body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ProjectSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.patch_project_settings_with_http_info(
owner, project, body, **kwargs
) # noqa: E501
def patch_project_settings_with_http_info(
self, owner, project, body, **kwargs
): # noqa: E501
"""Patch project settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_project_settings_with_http_info(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectSettings body: Project settings body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ProjectSettings, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_project_settings" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `patch_project_settings`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `patch_project_settings`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_project_settings`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/settings",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ProjectSettings", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_project_teams(self, owner, project, body, **kwargs): # noqa: E501
"""Patch project teams # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_project_teams(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectTeams body: Project settings body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ProjectTeams
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.patch_project_teams_with_http_info(
owner, project, body, **kwargs
) # noqa: E501
def patch_project_teams_with_http_info(
self, owner, project, body, **kwargs
): # noqa: E501
"""Patch project teams # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_project_teams_with_http_info(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectTeams body: Project settings body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ProjectTeams, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_project_teams" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `patch_project_teams`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `patch_project_teams`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_project_teams`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/teams",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ProjectTeams", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def restore_project(self, owner, project, **kwargs): # noqa: E501
"""Restore project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.restore_project(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.restore_project_with_http_info(
owner, project, **kwargs
) # noqa: E501
def restore_project_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Restore project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.restore_project_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method restore_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `restore_project`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `restore_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/restore",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def unbookmark_project(self, owner, project, **kwargs): # noqa: E501
"""Unbookmark project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unbookmark_project(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.unbookmark_project_with_http_info(
owner, project, **kwargs
) # noqa: E501
def unbookmark_project_with_http_info(self, owner, project, **kwargs): # noqa: E501
"""Unbookmark project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unbookmark_project_with_http_info(owner, project, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method unbookmark_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `unbookmark_project`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `unbookmark_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/unbookmark",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_project(self, owner, project_name, body, **kwargs): # noqa: E501
"""Update project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project(owner, project_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project_name: Required name (required)
:param V1Project body: Project body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_project_with_http_info(
owner, project_name, body, **kwargs
) # noqa: E501
def update_project_with_http_info(
self, owner, project_name, body, **kwargs
): # noqa: E501
"""Update project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project_with_http_info(owner, project_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project_name: Required name (required)
:param V1Project body: Project body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project_name", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_project" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `update_project`"
) # noqa: E501
# verify the required parameter 'project_name' is set
if self.api_client.client_side_validation and (
"project_name" not in local_var_params
or local_var_params["project_name"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project_name` when calling `update_project`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `update_project`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project_name" in local_var_params:
path_params["project.name"] = local_var_params["project_name"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project.name}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Project", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_project_settings(self, owner, project, body, **kwargs): # noqa: E501
"""Update project settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project_settings(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectSettings body: Project settings body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ProjectSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_project_settings_with_http_info(
owner, project, body, **kwargs
) # noqa: E501
def update_project_settings_with_http_info(
self, owner, project, body, **kwargs
): # noqa: E501
"""Update project settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project_settings_with_http_info(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectSettings body: Project settings body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ProjectSettings, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_project_settings" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `update_project_settings`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `update_project_settings`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `update_project_settings`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/settings",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ProjectSettings", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_project_teams(self, owner, project, body, **kwargs): # noqa: E501
"""Update project teams # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project_teams(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectTeams body: Project settings body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ProjectTeams
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_project_teams_with_http_info(
owner, project, body, **kwargs
) # noqa: E501
def update_project_teams_with_http_info(
self, owner, project, body, **kwargs
): # noqa: E501
"""Update project teams # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_project_teams_with_http_info(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project name (required)
:param V1ProjectTeams body: Project settings body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ProjectTeams, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_project_teams" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `update_project_teams`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `update_project_teams`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `update_project_teams`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/teams",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ProjectTeams", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def upload_project_artifact(
self, owner, project, uuid, uploadfile, **kwargs
): # noqa: E501
"""Upload artifact to a store via project access # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_project_artifact(owner, project, uuid, uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project having access to the store (required)
:param str uuid: Unique integer identifier of the entity (required)
:param file uploadfile: The file to upload. (required)
:param str path: File path query params.
:param bool overwrite: File path query params.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.upload_project_artifact_with_http_info(
owner, project, uuid, uploadfile, **kwargs
) # noqa: E501
def upload_project_artifact_with_http_info(
self, owner, project, uuid, uploadfile, **kwargs
): # noqa: E501
"""Upload artifact to a store via project access # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_project_artifact_with_http_info(owner, project, uuid, uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project having access to the store (required)
:param str uuid: Unique integer identifier of the entity (required)
:param file uploadfile: The file to upload. (required)
:param str path: File path query params.
:param bool overwrite: File path query params.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "project", "uuid", "uploadfile", "path", "overwrite"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_project_artifact" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `upload_project_artifact`"
) # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and (
"project" not in local_var_params
or local_var_params["project"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `project` when calling `upload_project_artifact`"
) # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and (
"uuid" not in local_var_params
or local_var_params["uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `uuid` when calling `upload_project_artifact`"
) # noqa: E501
# verify the required parameter 'uploadfile' is set
if self.api_client.client_side_validation and (
"uploadfile" not in local_var_params
or local_var_params["uploadfile"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `uploadfile` when calling `upload_project_artifact`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "project" in local_var_params:
path_params["project"] = local_var_params["project"] # noqa: E501
if "uuid" in local_var_params:
path_params["uuid"] = local_var_params["uuid"] # noqa: E501
query_params = []
if (
"path" in local_var_params and local_var_params["path"] is not None
): # noqa: E501
query_params.append(("path", local_var_params["path"])) # noqa: E501
if (
"overwrite" in local_var_params
and local_var_params["overwrite"] is not None
): # noqa: E501
query_params.append(
("overwrite", local_var_params["overwrite"])
) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if "uploadfile" in local_var_params:
local_var_files["uploadfile"] = local_var_params["uploadfile"] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["multipart/form-data"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/{owner}/{project}/artifacts/{uuid}/upload",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
|
import os, sys
config = dict()
# Supported module(s)
# <module>: {'status': True, 'help': '<help message>', 'description': '<module description>'}
config['modules'] = {
'default': {'status': True, 'help': 'Prints Hello World', 'description': 'Prints Hello World', 'args_chk': False},
'albus': {'status': True, 'help': 'Mongo Db/Data operations', 'description': 'Mongo Db/Data operations', 'args_chk': True}
}
# Mail Configuration
# <prod_type> : {'server': '<server name>', 'from': '<from_email_address>'}
config['mail'] = {
'production': {
'server': 'localhost',
'from': 'devOps_admin@domain.com'
},
'test': {
'server': 'localhost',
'from': 'devOps_admin@domain.com',
'to': 'akshayag@domain.com'
}
}
# Supported OS(s) and Distribution(s)
config['os'] = {
'supported': {'linux': True, 'windows': True},
'distribution': {
'linux': ['3.10.0-693.el7.x86_64'],
'windows': ['8']
}
}
# Enviornment Variables to be set
config['env'] = None
config['DEBUG'] = False
# Set Framework defaults, some of these defaults can be override via command line options.
config['APP_ROOT'] = os.path.abspath(os.path.dirname(sys.argv[0]))
config['LOG_ROOT'] = os.path.abspath(os.path.dirname(sys.argv[0]))
|
import time
import pytest
from helpers.cluster import ClickHouseCluster
from multiprocessing.dummy import Pool
from helpers.test_tools import assert_eq_with_retry
def _fill_nodes(nodes, shard, connections_count):
for node in nodes:
node.query(
'''
CREATE DATABASE test;
CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}')
PARTITION BY date
ORDER BY id
SETTINGS
replicated_max_parallel_fetches_for_host={connections},
index_granularity=8192;
'''.format(shard=shard, replica=node.name, connections=connections_count))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', user_configs=[], main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', user_configs=[], main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_small_cluster():
try:
cluster.start()
_fill_nodes([node1, node2], 1, 1)
yield cluster
finally:
cluster.shutdown()
def test_single_endpoint_connections_count(start_small_cluster):
def task(count):
print("Inserting ten times from {}".format(count))
for i in xrange(count, count + 10):
node1.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
p = Pool(10)
p.map(task, xrange(0, 100, 10))
assert_eq_with_retry(node1, "select count() from test_table", "100")
assert_eq_with_retry(node2, "select count() from test_table", "100")
assert node2.query("SELECT value FROM system.events where event='CreatedHTTPConnections'") == '1\n'
def test_keepalive_timeout(start_small_cluster):
current_count = int(node1.query("select count() from test_table").strip())
node1.query("insert into test_table values ('2017-06-16', 777, 0)")
assert_eq_with_retry(node2, "select count() from test_table", str(current_count + 1))
# Server keepAliveTimeout is 3 seconds, default client session timeout is 8
# lets sleep in that interval
time.sleep(4)
node1.query("insert into test_table values ('2017-06-16', 888, 0)")
time.sleep(3)
assert_eq_with_retry(node2, "select count() from test_table", str(current_count + 2))
assert not node2.contains_in_log("No message received"), "Found 'No message received' in clickhouse-server.log"
node3 = cluster.add_instance('node3', user_configs=[], main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', user_configs=[], main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node5 = cluster.add_instance('node5', user_configs=[], main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_big_cluster():
try:
cluster.start()
_fill_nodes([node3, node4, node5], 2, 2)
yield cluster
finally:
cluster.shutdown()
def test_multiple_endpoint_connections_count(start_big_cluster):
def task(count):
print("Inserting ten times from {}".format(count))
if (count / 10) % 2 == 1:
node = node3
else:
node = node4
for i in xrange(count, count + 10):
node.query("insert into test_table values ('2017-06-16', {}, 0)".format(i))
p = Pool(10)
p.map(task, xrange(0, 100, 10))
assert_eq_with_retry(node3, "select count() from test_table", "100")
assert_eq_with_retry(node4, "select count() from test_table", "100")
assert_eq_with_retry(node5, "select count() from test_table", "100")
# two per each host
assert node5.query("SELECT value FROM system.events where event='CreatedHTTPConnections'") == '4\n'
|
from django.core.management.base import BaseCommand
from core import fixtures
class Command(BaseCommand):
help = "Create demo site data"
def handle(self, *args, **options):
self.stdout.write(self.style.SUCCESS("creating perm group"))
fixtures.create_perm_group()
self.stdout.write(self.style.SUCCESS("creating users and products"))
users, products = fixtures.create_users_and_products(
num_users=50, prob_user_has_product=0.5, prob_product_onsale=0.9
)
self.stdout.write(self.style.SUCCESS("creating orders"))
fixtures.create_orders(
users, products, prob_user_has_order=0.75, prob_order_has_extra_items=0.5
)
|
"""
Class for a more OOP interface to several potentials.
"""
# Import NumPy which is used to define pi, sqrt, array, .transpose etc. as
import numpy as np
from scipy.interpolate import CubicSpline
class Potential(object):
def __init__(self, file=None):
"""Constructor method, added vectorized version of all the methods.
Args:
file (string, optional): The caller may provide a file to interpolate a
potential onto the dvr grid. The file must be in two tab separated
columns. Default to None
Attributes:
vectorized_V_morse (ndarray): Vectorized version of the morse function
vectorized_V_Bernstein (ndarray): Vectorized version of the Bernstein function
vectorized_V_c_state (ndarray): Vectorized version of the c-state interpolated function of the cStateDCalc.csv file, which is NOT provided in the released package
vectorized_V_Interpolated (ndarray): Vectorized version of the
interpolated function.
vectorized_V_Coulomb (ndarray): Vectorized version of the Coulomb
function
Todo:
Add a general interpolation scheme so any file passed into this
class's constructor will work. Right now, only works for a
particular file that was tested in house and not released with
package.
"""
self.vectorized_V_morse_centrifugal = np.vectorize(
self.V_morse_centrifugal
)
self.vectorized_V_morse_1 = np.vectorize(self.V_morse_1)
self.vectorized_V_morse_2 = np.vectorize(self.V_morse_2)
self.vectorized_V_Bernstein = np.vectorize(self.V_Bernstein)
self.vectorized_V_ = np.vectorize(self.V_Bernstein)
self.vectorized_V_c_state = np.vectorize(self.V_c_state)
self.vectorized_V_Interpolated = np.vectorize(self.V_Interpolated)
self.vectorized_V_Coulomb = np.vectorize(self.V_Coulomb)
if file is None:
print(
"\nPotential constructed without a file. Can only use analytic potential functions defined in the quantumgrid.potential class"
)
else:
file_name = open(file, "r")
data = np.loadtxt(file_name)
pot_len_state = data.shape[0]
pot_columns = data.shape[1]
print(
"Finished reading V file with ",
pot_len_state,
" rows and ",
pot_columns,
" columns",
)
self.r_data = np.empty(pot_len_state)
self.V_data = np.empty(pot_len_state)
for i in range(0, pot_len_state):
self.r_data[i] = data[i, 0]
self.V_data[i] = data[i, 1]
self.V_vals = CubicSpline(self.r_data, self.V_data)
def V_morse_1(self, r: complex, time: int = 0.0) -> complex:
"""
Morse Potential defined by
.. math::
V = d*(y^2 - 2*y)
with :math:`y` defined by
.. math::
y = e^{(-a*(r-re))}
This potential also defines parameters specifically for :math:`H_2`, De = 4.75 eV
Args:
r (complex): FEM-DVR point where this potential is evaluated at
t (int): Time dependence of this potential to simulate
turning on a field pertubation, for example. Defaults to
t=0.0
Returns:
pot (complex): potential value at the point r at the time t
"""
a = 1.0277
re = 1.4022
d = 0.1746
# potential
y = np.exp(-a * (r - re))
term = y ** 2 - 2.0 * y
pot = d * term
return pot
def V_morse_2(self, r: complex, time: int = 0.0) -> complex:
"""
Morse Potential defined by
.. math::
V = d*(y^2 - 2*y)
with :math:`y` defined by
.. math::
y = e^{(-a*(r-re))}
This potential also defines parameters specifically for :math:`H_2`
Args:
r (complex): FEM-DVR point where this potential is evaluated at
t (int): Time dependence of this potential to simulate
turning on a field pertubation, for example. Defaults to
t=0.0
Returns:
pot (complex): potential value at the point r at the time t
"""
a = 1.0277
re = 2.0
d = 1.2 * 0.1746
Eshift = 0.15
# potential
y = np.exp(-a * (r - re))
term = y ** 2 - 2.0 * y
pot = d * term + Eshift
return pot
def V_morse_centrifugal(self, r: complex, time: int = 0.0) -> complex:
"""
Morse Potential defined by
.. math::
V = d*(y^2 - 2*y) + \\mathrm{Centrifugal potential}
with :math:`y` defined by
.. math::
y = e^{(-a*(r-re))}
This potential also defines parameters specifically for :math:`H_2`
Args:
r (complex): FEM-DVR point where this potential is evaluated at
t (int): Time dependence of this potential to simulate
turning on a field pertubation, for example. Defaults to
t=0.0
Returns:
pot (complex): potential value at the point r at the time t
"""
d = 0.1746
a = 1.0277
re = 1.4022
H_Mass = 1.0078
Daltons_to_eMass = 1822.89
mu = (H_Mass / 2.0) * Daltons_to_eMass
y = np.exp(-a * (r - re))
# j value for centrifugal potential. mu defined in main part of script above
j = 0 # Morse potential has rotational predissociation resonances for some j
pot = d * (y ** 2 - 2.0 * y) + np.float(j * (j + 1)) / (
2.0 * mu * r ** 2
)
return pot
def V_Bernstein(self, r: complex, time: int = 0.0) -> complex:
"""
:math:`H_2` potential from T-G. Wiechand R.B. Bernstein, J. Chem. Phys. 46 (1967) 4905.
This is an accurate fit to the accurate Kolos and Wolneiwicz potential curve
representation is valid :math:`0.4 <= R` to infinity
used in old ECS calculation in
Julia Turner and C. William McCurdy, Chemical Physics 71(1982) 127-133
for resonances in dissociation for :math:`j .ne. 0`
Note:
ECS contour must begin beyond :math:`r = 9.5 a_0` for safe analytic continuation
Args:
r (complex): FEM-DVR point where this potential is evaluated at
time (int): Time dependence of this potential to simulate
turning on a field pertubation, for example. Defaults to
t=0.0
Returns:
pot (complex): potential value at the point r at the time t
"""
a_vec = [
-3.7623236364e-3,
1.4291725467e-2,
-2.6491493104e-2,
3.0802158643e-2,
-2.4414431427e-2,
1.2072690633e-2,
1.0669803453e-2,
-3.1351262502e-2,
-2.4593504473e-2,
9.0968827782e-2,
8.0055110345e-2,
-2.2685375608e-1,
-1.4912492825e-1,
3.9041633873e-1,
1.7916153661e-1,
-4.7291514961e-1,
-1.4317771747e-1,
4.1382169150e-1,
7.3590396723e-2,
-2.6524118029e-1,
-1.9970631183e-2,
1.2463802250e-1,
-1.2491070013e-3,
-4.2434523716e-2,
3.4575120517e-3,
1.0180959606e-2,
-1.4411614262e-3,
-1.6314090918e-3,
3.1362830316e-4,
1.5666712172e-4,
-3.6848921690e-5,
-6.8198927741e-6,
1.8540052417e-6,
]
# from Hirshfelder and Lowdin
# Hirshfelder and Lowdin corrected values in 1965 -1 -C6/r^6 -C8/r^8
C6 = 6.499026
C8 = 124.395
# Chan and Dalgarno give their values in Rydbergs evidently. This is from paper cited by Bernstein above
C10 = 6571.0 / 2.0
# print("length of a_vec = ",len(a_vec))
#
H_Mass = 1.0078
Daltons_to_eMass = 1822.89
mu = (H_Mass / 2.0) * Daltons_to_eMass
if np.real(r) >= 0.4 and np.real(r) <= 9.5:
vsum = 0.0
for n in range(0, 33):
vsum = vsum + a_vec[n] * ((r - 5.0) / 2.5) ** n
# print("n, a_vec,",n," ",'{:.10e}'.format(a_vec[n]))
else:
vsum = -C6 / r ** 6 - C8 / r ** 8 - C10 / r ** 10
# j = 17 is Fig 2 of Turner+McCurdy, E_res = (0.004044878419994 -0.000219496448j) hartrees
j = 17
vpot = vsum + float(j * (j + 1)) / (2.0 * mu * r ** 2)
return vpot
def V_c_state(self, r: complex, time: int = 0.0) -> complex:
"""
Interpolate computed values using scipy CubicSpline
:math:`\\frac{1}{R^4}` tail added matching value and finite diff
derivative at :math:`R=5`
Note:
At this point constants are for Lucchese 4/3/2020 calculation:
:math:`c ^4\Sigma_u^-` state of :math:`O_2^+` where the orbitals come
from a SA-MCSCF on the ion using an aug-cc-vTZP basis set. This was for a
cStateDCalc.csv file, which is NOT provided in the released package.
Therefore, this potential is just a scaffold to implement a general
interpolation scheme and shouldn't be experimented with until then.
Args:
r (complex): FEM-DVR point where this potential is evaluated at
time (int): Time dependence of this potential to simulate
turning on a field pertubation, for example. Defaults to
t=0.0
Returns:
pot (complex): potential value at the point r at the time t
"""
Hartree_to_eV = 27.211386245988 # NIST ref
n_vals = self.r_data.shape[0]
if self.r_data[0] <= r and r <= self.r_data[n_vals - 1]:
x = np.real(r)
pot = self.V_vals(r)
if np.real(r) > 5.0:
pot = 20.26002003285 - 85.94654796874 / r ** 4
if np.real(r) < self.r_data[0] and np.real(r) >= 1.5:
pot = self.V_vals(r)
if np.real(r) < 1.5:
print("r out of range in V_c_state ", r)
exit()
# interpolated value
potential = (pot - 20.26002003285) / Hartree_to_eV
return potential
def V_Interpolated(self, r: complex, time: int) -> complex:
"""
Interpolated values using scipy CubicSpline
Note:
Requires a file of potential values to interpolate in this class's constructor!
Args:
r (complex): FEM-DVR point where this potential is evaluated at
t (int): Time dependence of this potential to simulate
turning on a field pertubation, for example. Defaults to
t=0.0
Returns:
pot (complex): potential value at the point r at the time t
"""
return self.V_vals(r)
def V_colinear_model(self, r1: complex, r2: complex) -> complex:
"""
Colinear model for two-electron atom
Args:
r1 (complex): FEM-DVR first point where this potential is evaluated at
r2 (complex): FEM-DVR second point where this potential is evaluated at
Returns:
pot (complex): potential value at the point r1 and r2
"""
potval = 1.0 / (r1 + r2)
return potval
#
def V_Coulomb(self, r: complex, time: int = 0.0) -> complex:
"""
Coulomb potential for He or H- one-electron Hamiltonian
Note:
Nuclear charge is set to 2.0
Args:
r (complex): FEM-DVR point where this potential is evaluated at
Znuc (double): Charge on the residual ion
t (int): Time dependence of this potential to simulate
turning on a field pertubation, for example. Defaults to
t=0.0
Returns:
pot (complex): potential value on the Coulomb tail
"""
# nuclear charge should be passed in but this would mean can't vectorize
# this function, which doesn't really do anything anyways
Znuc = 2.0
pot = -Znuc / r
return pot
|
from django.shortcuts import render
from bokeh.plotting import figure, output_file, show
from django.http import HttpResponse
from django.urls import reverse
from bokeh.resources import CDN
from bokeh.embed import components
import math
# Create your views here.
def index(request):
if request.method == "GET":
return render(request, 'plotter/index.html')
#return HttpResponse(reverse('plotter:index'))
elif request.method == "POST":
domain = request.POST['domain'].split()
eqn = request.POST['equation']
domain = range( int(domain[0]), int(domain[1]) )
y = [ eval(eqn) for x in domain ]
title = 'y = ' + eqn
plot = figure(title= title , x_axis_label= 'X-Axis', y_axis_label= 'Y- Axis', plot_width =400, plot_height =400)
plot.line(domain, y, legend= 'f(x)', line_width = 2)
script, div = components(plot)
return render(request, 'plotter/index.html', {'script' : script , 'div' : div} )
else:
pass
|
'''
Created on Oct 27, 2018
@author: nilson.nieto
'''
print("Int values")
a=12
b=231
c=2323
print(a,b,c)
print("Float point")
x=1.23
y=23.01
print(x,y)
print("Complex")
d = 3+ 2j
print(d)
print("Type conversion")
print("Binary ",bin(10))
print("Float",float("2.34"))
print("Hex",hex(18))
|
#Creating and Manipulating PDFs with pdfrw
# https://www.blog.pythonlibrary.org/2018/06/06/creating-and-manipulating-pdfs-with-pdfrw/
# Extract certain types of information from a PDF
# Splitting PDFs
# Merging / Concatenating PDFs
# Rotating pages
# Creating overlays or watermarks
# Scaling pages
# Combining the use of pdfrw and ReportLab
import base64
import tempfile
from pdf2docx import Converter
import streamlit as st
from pdf2image import convert_from_path
from pathlib import Path
def show_pdf(file_path:str):
with open(file_path, "rb") as f:
base64_pdf = base64.b64encode(f.read()).decode("utf-8")
pdf_display = f'<embed src="data:application/pdf;base64,{base64_pdf}" width="100%" height="1000" type="application/pdf">'
st.markdown(pdf_display, unsafe_allow_html=True)
def show_path(file_path:str):
with open(file_path, "rb") as f:
base64_file = base64.b64encode(f.read()).decode("utf-8")
return base64_file
def pdf_tmp_file(uploaded_file):
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
st.markdown("## Original PDF file")
fp = Path(tmp_file.name)
fp.write_bytes(uploaded_file.getvalue())
st.write(tmp_file.name)
st.write(show_pdf(tmp_file.name))
imgs = Converter(tmp_file.name)
docx = imgs.convert(start=0, end=None)
print(docx)
#st.download_button('Download binary file', docx)
st.markdown(f"Converted images from PDF")
st.write(tmp_file.name)
st.write(docx)
def main():
"""Streamlit application
"""
st.title("PDF file uplodaer")
uploaded_file = st.file_uploader("Choose your .pdf file", type="pdf")
if uploaded_file is not None:
# Make temp file path from uploaded file
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
st.markdown("## Original PDF file")
fp = Path(tmp_file.name)
fp.write_bytes(uploaded_file.getvalue())
st.write(tmp_file.name)
st.write("???")
#st.write(show_pdf(tmp_file.name))
imgs = Converter(tmp_file.name)
with open(imgs, "rb") as f:
base64_pdf = base64.b64encode(f.read()).decode("utf-8")
st.write("???")
st.write(base64_pdf)
#docx = imgs.convert(start=0, end=None)
#st.download_button('Download binary file', docx)
st.markdown(f"Converted images from PDF")
st.write(tmp_file.name)
#st.write(docx)
#st.image(imgs)
if __name__ == "__main__":
main()
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
from oslotest import mockpatch
from tempest import auth
from tempest import config
from tempest import exceptions
from tempest.services.identity.json import token_client as v2_client
from tempest.services.identity.v3.json import token_client as v3_client
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests import fake_credentials
from tempest.tests import fake_http
from tempest.tests import fake_identity
def fake_get_credentials(fill_in=True, identity_version='v2', **kwargs):
return fake_credentials.FakeCredentials()
class BaseAuthTestsSetUp(base.TestCase):
_auth_provider_class = None
credentials = fake_credentials.FakeCredentials()
def _auth(self, credentials, **params):
"""
returns auth method according to keystone
"""
return self._auth_provider_class(credentials, **params)
def setUp(self):
super(BaseAuthTestsSetUp, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(auth, 'get_credentials', fake_get_credentials)
self.auth_provider = self._auth(self.credentials)
class TestBaseAuthProvider(BaseAuthTestsSetUp):
"""
This tests auth.AuthProvider class which is base for the other so we
obviously don't test not implemented method or the ones which strongly
depends on them.
"""
class FakeAuthProviderImpl(auth.AuthProvider):
def _decorate_request():
pass
def _fill_credentials():
pass
def _get_auth():
pass
def base_url():
pass
def is_expired():
pass
_auth_provider_class = FakeAuthProviderImpl
def test_check_credentials_bad_type(self):
self.assertFalse(self.auth_provider.check_credentials([]))
def test_auth_data_property_when_cache_exists(self):
self.auth_provider.cache = 'foo'
self.useFixture(mockpatch.PatchObject(self.auth_provider,
'is_expired',
return_value=False))
self.assertEqual('foo', getattr(self.auth_provider, 'auth_data'))
def test_delete_auth_data_property_through_deleter(self):
self.auth_provider.cache = 'foo'
del self.auth_provider.auth_data
self.assertIsNone(self.auth_provider.cache)
def test_delete_auth_data_property_through_clear_auth(self):
self.auth_provider.cache = 'foo'
self.auth_provider.clear_auth()
self.assertIsNone(self.auth_provider.cache)
def test_set_and_reset_alt_auth_data(self):
self.auth_provider.set_alt_auth_data('foo', 'bar')
self.assertEqual(self.auth_provider.alt_part, 'foo')
self.assertEqual(self.auth_provider.alt_auth_data, 'bar')
self.auth_provider.reset_alt_auth_data()
self.assertIsNone(self.auth_provider.alt_part)
self.assertIsNone(self.auth_provider.alt_auth_data)
def test_auth_class(self):
self.assertRaises(TypeError,
auth.AuthProvider,
fake_credentials.FakeCredentials)
class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
_endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
_auth_provider_class = auth.KeystoneV2AuthProvider
credentials = fake_credentials.FakeKeystoneV2Credentials()
def setUp(self):
super(TestKeystoneV2AuthProvider, self).setUp()
self.stubs.Set(v2_client.TokenClientJSON, 'raw_request',
fake_identity._fake_v2_response)
self.target_url = 'test_api'
def _get_fake_alt_identity(self):
return fake_identity.ALT_IDENTITY_V2_RESPONSE['access']
def _get_result_url_from_endpoint(self, ep, endpoint_type='publicURL',
replacement=None):
if replacement:
return ep[endpoint_type].replace('v2', replacement)
return ep[endpoint_type]
def _get_token_from_fake_identity(self):
return fake_identity.TOKEN
def _get_from_fake_identity(self, attr):
access = fake_identity.IDENTITY_V2_RESPONSE['access']
if attr == 'user_id':
return access['user']['id']
elif attr == 'tenant_id':
return access['token']['tenant']['id']
def _test_request_helper(self, filters, expected):
url, headers, body = self.auth_provider.auth_request('GET',
self.target_url,
filters=filters)
self.assertEqual(expected['url'], url)
self.assertEqual(expected['token'], headers['X-Auth-Token'])
self.assertEqual(expected['body'], body)
def _auth_data_with_expiry(self, date_as_string):
token, access = self.auth_provider.auth_data
access['token']['expires'] = date_as_string
return token, access
def test_request(self):
filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
url = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1]) + '/' + self.target_url
expected = {
'body': None,
'url': url,
'token': self._get_token_from_fake_identity(),
}
self._test_request_helper(filters, expected)
def test_request_with_alt_auth_cleans_alt(self):
self.auth_provider.set_alt_auth_data(
'body',
(fake_identity.ALT_TOKEN, self._get_fake_alt_identity()))
self.test_request()
# Assert alt auth data is clear after it
self.assertIsNone(self.auth_provider.alt_part)
self.assertIsNone(self.auth_provider.alt_auth_data)
def test_request_with_alt_part_without_alt_data(self):
"""
Assert that when alt_part is defined, the corresponding original
request element is kept the same.
"""
filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'fakeRegion'
}
self.auth_provider.set_alt_auth_data('url', None)
url, headers, body = self.auth_provider.auth_request('GET',
self.target_url,
filters=filters)
self.assertEqual(url, self.target_url)
self.assertEqual(self._get_token_from_fake_identity(),
headers['X-Auth-Token'])
self.assertEqual(body, None)
def test_request_with_bad_service(self):
filters = {
'service': 'BAD_SERVICE',
'endpoint_type': 'publicURL',
'region': 'fakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self.auth_provider.auth_request, 'GET',
self.target_url, filters=filters)
def test_request_without_service(self):
filters = {
'service': None,
'endpoint_type': 'publicURL',
'region': 'fakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self.auth_provider.auth_request, 'GET',
self.target_url, filters=filters)
def test_check_credentials_missing_attribute(self):
for attr in ['username', 'password']:
cred = copy.copy(self.credentials)
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred))
def test_fill_credentials(self):
self.auth_provider.fill_credentials()
creds = self.auth_provider.credentials
for attr in ['user_id', 'tenant_id']:
self.assertEqual(self._get_from_fake_identity(attr),
getattr(creds, attr))
def _test_base_url_helper(self, expected_url, filters,
auth_data=None):
url = self.auth_provider.base_url(filters, auth_data)
self.assertEqual(url, expected_url)
def test_base_url(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1])
self._test_base_url_helper(expected, self.filters)
def test_base_url_to_get_admin_endpoint(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'adminURL',
'region': 'FakeRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1], endpoint_type='adminURL')
self._test_base_url_helper(expected, self.filters)
def test_base_url_unknown_region(self):
"""
Assure that if the region is unknow the first endpoint is returned.
"""
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'AintNoBodyKnowThisRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][0])
self._test_base_url_helper(expected, self.filters)
def test_base_url_with_non_existent_service(self):
self.filters = {
'service': 'BAD_SERVICE',
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self._test_base_url_helper, None, self.filters)
def test_base_url_without_service(self):
self.filters = {
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self._test_base_url_helper, None, self.filters)
def test_base_url_with_api_version_filter(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion',
'api_version': 'v12'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1], replacement='v12')
self._test_base_url_helper(expected, self.filters)
def test_base_url_with_skip_path_filter(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion',
'skip_path': True
}
expected = 'http://fake_url/'
self._test_base_url_helper(expected, self.filters)
def test_token_not_expired(self):
expiry_data = datetime.datetime.utcnow() + datetime.timedelta(days=1)
auth_data = self._auth_data_with_expiry(
expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
self.assertFalse(self.auth_provider.is_expired(auth_data))
def test_token_expired(self):
expiry_data = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
auth_data = self._auth_data_with_expiry(
expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
self.assertTrue(self.auth_provider.is_expired(auth_data))
def test_token_not_expired_to_be_renewed(self):
expiry_data = datetime.datetime.utcnow() + \
self.auth_provider.token_expiry_threshold / 2
auth_data = self._auth_data_with_expiry(
expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
self.assertTrue(self.auth_provider.is_expired(auth_data))
class TestKeystoneV3AuthProvider(TestKeystoneV2AuthProvider):
_endpoints = fake_identity.IDENTITY_V3_RESPONSE['token']['catalog']
_auth_provider_class = auth.KeystoneV3AuthProvider
credentials = fake_credentials.FakeKeystoneV3Credentials()
def setUp(self):
super(TestKeystoneV3AuthProvider, self).setUp()
self.stubs.Set(v3_client.V3TokenClientJSON, 'raw_request',
fake_identity._fake_v3_response)
def _get_fake_alt_identity(self):
return fake_identity.ALT_IDENTITY_V3['token']
def _get_result_url_from_endpoint(self, ep, replacement=None):
if replacement:
return ep['url'].replace('v3', replacement)
return ep['url']
def _auth_data_with_expiry(self, date_as_string):
token, access = self.auth_provider.auth_data
access['expires_at'] = date_as_string
return token, access
def _get_from_fake_identity(self, attr):
token = fake_identity.IDENTITY_V3_RESPONSE['token']
if attr == 'user_id':
return token['user']['id']
elif attr == 'project_id':
return token['project']['id']
elif attr == 'user_domain_id':
return token['user']['domain']['id']
elif attr == 'project_domain_id':
return token['project']['domain']['id']
def test_check_credentials_missing_attribute(self):
# reset credentials to fresh ones
self.credentials.reset()
for attr in ['username', 'password', 'user_domain_name',
'project_domain_name']:
cred = copy.copy(self.credentials)
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred),
"Credentials should be invalid without %s" % attr)
def test_check_domain_credentials_missing_attribute(self):
# reset credentials to fresh ones
self.credentials.reset()
domain_creds = fake_credentials.FakeKeystoneV3DomainCredentials()
for attr in ['username', 'password', 'user_domain_name']:
cred = copy.copy(domain_creds)
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred),
"Credentials should be invalid without %s" % attr)
def test_fill_credentials(self):
self.auth_provider.fill_credentials()
creds = self.auth_provider.credentials
for attr in ['user_id', 'project_id', 'user_domain_id',
'project_domain_id']:
self.assertEqual(self._get_from_fake_identity(attr),
getattr(creds, attr))
# Overwrites v2 test
def test_base_url_to_get_admin_endpoint(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'admin',
'region': 'MiddleEarthRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][2])
self._test_base_url_helper(expected, self.filters)
|
import pytest
from lhotse.cut import CutSet
from lhotse.dataset.speech_synthesis import SpeechSynthesisDataset
@pytest.fixture
def cut_set():
return CutSet.from_json('test/fixtures/ljspeech/cuts.json')
def test_speech_synthesis_dataset(cut_set):
dataset = SpeechSynthesisDataset(cut_set)
example = dataset[0]
assert example['audio'].shape[1] > 0
assert example['features'].shape[0] > 0
assert len(example['tokens']) > 0
|
# Generated by Django 2.2.8 on 2019-12-04 16:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reputation', '0010_auto_20191204_1515'),
]
operations = [
migrations.RemoveField(
model_name='withdrawal',
name='amount_decimal_part',
),
migrations.RemoveField(
model_name='withdrawal',
name='amount_integer_part',
),
]
|
from setuptools import setup, find_packages
setup(
name='celery_zabbix',
version='1.0.5.pirdev1',
author='Zeit Online',
author_email='zon-backend@zeit.de',
url='https://github.com/zeitonline/celery_zabbix',
description="Sends task execution metrics to Zabbix",
long_description='\n\n'.join(
open(x).read() for x in ['README.rst', 'CHANGES.txt']),
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
install_requires=[
'celery',
'scales',
'setuptools',
'zbxsend',
],
extras_require={'test': [
'mock',
'pytest',
]},
entry_points={
'celery.commands': [
'zabbix = celery_zabbix.receiver:Command',
]
}
)
|
import requests
class ChuckTester:
def __init__(self):
self.chuck_endpoint = "http://127.0.0.1:8123"
self.endpoint = "https://test.net"
self.s = requests.Session()
self.s.proxies = {'https': 'https://127.0.0.1:8123'}
self.s.verify = False
def activate_scenario(self, scenario, id):
resp = requests.put(self.chuck_endpoint + "/scenario/" + scenario + "/" + id + "/no")
self.s.headers = {'automation-test-identifier': id}
# print("Activate scenario {}, result {}".format(scenario, resp))
# scenario 1
def auth_init(self):
resp = self.s.get(self.endpoint + "/v1/authinit?format=json&apikey=1234567&code=7654321")
if resp.status_code == 200:
return resp.json()
else:
return None
def verify(self):
resp = self.s.get(self.endpoint + "/v1/verify?verifier=1a2b3c4d")
if resp.status_code == 200:
return resp.json()
else:
return None
def login(self):
resp = self.s.post(self.endpoint + "/v1/login?format=json&apikey=987654")
if resp.status_code == 200:
return resp.json()
else:
return None
# scenario 2
def guest_auth(self):
resp = self.s.post(self.endpoint + "/v2/authtoken/guest?format=json&apikey=1234")
return resp.status_code
def delete_token(self):
resp = self.s.delete(self.endpoint + "/v1/authtoken/user-id-111?format=json&apikey=2345")
return resp.status_code
# scenario 3
def auth_init_sc3(self):
resp = self.s.get(self.endpoint + "/v1/authinit?format=json&apikey=1234567&code=7654321")
if resp.status_code == 200:
return resp.json()
else:
return None
def preferred_store(self):
resp = self.s.get(self.endpoint + "/v1/customer/profile/preferredstore?format=json&apikey=9878")
if resp.status_code == 200:
return resp.json()
else:
return None
# invalid requests
def wrong_path(self):
resp = self.s.post(self.endpoint + "/v2/login?format=json&apikey=987654")
return resp.status_code
def wrong_query(self):
resp = self.s.post(self.endpoint + "/v1/login?format=xml&apikey=987654")
return resp.status_code
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import codecs
import csv
import json
import ntpath
import os
import posixpath
import re
import shutil
import string
import sys
from distutils.dir_util import copy_tree
from itertools import zip_longest
from attributecode import CRITICAL
from attributecode import WARNING
from attributecode import Error
on_windows = 'win32' in sys.platform
# boolean field name
boolean_fields = ['redistribute', 'attribute', 'track_change', 'modified', 'internal_use_only']
file_fields = ['about_resource', 'notice_file', 'changelog_file', 'author_file']
def to_posix(path):
"""
Return a path using the posix path separator given a path that may contain
posix or windows separators, converting "\\" to "/". NB: this path will
still be valid in the windows explorer (except for a UNC or share name). It
will be a valid path everywhere in Python. It will not be valid for windows
command line operations.
"""
return path.replace(ntpath.sep, posixpath.sep)
UNC_PREFIX = u'\\\\?\\'
UNC_PREFIX_POSIX = to_posix(UNC_PREFIX)
UNC_PREFIXES = (UNC_PREFIX_POSIX, UNC_PREFIX,)
valid_file_chars = string.digits + string.ascii_letters + '_-.+()~[]{}|@' + ' '
def invalid_chars(path):
"""
Return a list of invalid characters in the file name of `path`.
"""
path = to_posix(path)
rname = resource_name(path)
name = rname.lower()
return [c for c in name if c not in valid_file_chars]
def check_file_names(paths):
"""
Given a sequence of file paths, check that file names are valid and that
there are no case-insensitive duplicates in any given directories.
Return a list of errors.
From spec :
A file name can contain only these US-ASCII characters:
- digits from 0 to 9
- uppercase and lowercase letters from A to Z
- the _ underscore, - dash and . period signs.
From spec:
The case of a file name is not significant. On case-sensitive file
systems (such as Linux), a tool must raise an error if two ABOUT files
stored in the same directory have the same lowercase file name.
"""
# FIXME: this should be a defaultdicts that accumulates all duplicated paths
seen = {}
errors = []
for orig_path in paths:
path = orig_path
invalid = invalid_chars(path)
if invalid:
invalid = ''.join(invalid)
msg = ('Invalid characters %(invalid)r in file name at: '
'%(path)r' % locals())
errors.append(Error(CRITICAL, msg))
path = to_posix(orig_path)
name = resource_name(path).lower()
parent = posixpath.dirname(path)
path = posixpath.join(parent, name)
path = posixpath.normpath(path)
path = posixpath.abspath(path)
existing = seen.get(path)
if existing:
msg = ('Duplicate files: %(orig_path)r and %(existing)r '
'have the same case-insensitive file name' % locals())
errors.append(Error(CRITICAL, msg))
else:
seen[path] = orig_path
return errors
def wrap_boolean_value(context):
updated_context = ''
for line in context.splitlines():
"""
wrap the boolean value in quote
"""
key = line.partition(':')[0]
value = line.partition(':')[2].strip()
value = '"' + value + '"'
if key in boolean_fields and not value == "":
updated_context += key + ': ' + value + '\n'
else:
updated_context += line + '\n'
return updated_context
def replace_tab_with_spaces(context):
updated_context = ''
for line in context.splitlines():
"""
Replace tab with 4 spaces
"""
updated_context += line.replace('\t', ' ') + '\n'
return updated_context
# TODO: rename to normalize_path
def get_absolute(location):
"""
Return an absolute normalized location.
"""
location = os.path.expanduser(location)
location = os.path.expandvars(location)
location = os.path.normpath(location)
location = os.path.abspath(location)
return location
def get_locations(location):
"""
Return a list of locations of files given the `location` of a
a file or a directory tree containing ABOUT files.
File locations are normalized using posix path separators.
"""
location = add_unc(location)
location = get_absolute(location)
assert os.path.exists(location)
if os.path.isfile(location):
yield location
else:
for base_dir, _, files in os.walk(location):
for name in files:
bd = to_posix(base_dir)
yield posixpath.join(bd, name)
def get_about_locations(location):
"""
Return a list of locations of ABOUT files given the `location` of a
a file or a directory tree containing ABOUT files.
File locations are normalized using posix path separators.
"""
for loc in get_locations(location):
if is_about_file(loc):
yield loc
def norm(p):
"""
Normalize the path
"""
if p.startswith(UNC_PREFIX) or p.startswith(to_posix(UNC_PREFIX)):
p = p.strip(UNC_PREFIX).strip(to_posix(UNC_PREFIX))
p = to_posix(p)
p = p.strip(posixpath.sep)
p = posixpath.normpath(p)
return p
def get_relative_path(base_loc, full_loc):
"""
Return a posix path for a given full location relative to a base location.
The first segment of the different between full_loc and base_loc will become
the first segment of the returned path.
"""
base = norm(base_loc)
path = norm(full_loc)
assert path.startswith(base), ('Cannot compute relative path: '
'%(path)r does not start with %(base)r'
% locals())
base_name = resource_name(base)
no_dir = base == base_name
same_loc = base == path
if same_loc:
# this is the case of a single file or single dir
if no_dir:
# we have no dir: the full path is the same as the resource name
relative = base_name
else:
# we have at least one dir
parent_dir = posixpath.dirname(base)
parent_dir = resource_name(parent_dir)
relative = posixpath.join(parent_dir, base_name)
else:
relative = path[len(base) + 1:]
# We don't want to keep the first segment of the root of the returned path.
# See https://github.com/nexB/attributecode/issues/276
# relative = posixpath.join(base_name, relative)
return relative
def to_native(path):
"""
Return a path using the current OS path separator given a path that may
contain posix or windows separators, converting "/" to "\\" on windows
and "\\" to "/" on posix OSes.
"""
path = path.replace(ntpath.sep, os.path.sep)
path = path.replace(posixpath.sep, os.path.sep)
return path
def is_about_file(path):
"""
Return True if the path represents a valid ABOUT file name.
"""
if path:
path = path.lower()
return path.endswith('.about') and path != '.about'
def resource_name(path):
"""
Return the file or directory name from a path.
"""
path = path.strip()
path = to_posix(path)
path = path.rstrip(posixpath.sep)
_left, right = posixpath.split(path)
return right.strip()
def load_csv(location):
"""
Read CSV at `location`, return a list of ordered dictionaries, one
for each row.
"""
results = []
# FIXME: why ignore encoding errors here?
with codecs.open(location, mode='rb', encoding='utf-8-sig',
errors='ignore') as csvfile:
for row in csv.DictReader(csvfile):
# convert all the column keys to lower case
updated_row = {key.lower(): value for key, value in row.items()}
results.append(updated_row)
return results
def load_json(location):
"""
Read JSON file at `location` and return a list of ordered dicts, one for
each entry.
"""
with open(location) as json_file:
results = json.load(json_file)
# FIXME: this is too clever and complex... IMHO we should not try to guess the format.
# instead a command line option should be provided explictly to say what is the format
if isinstance(results, list):
results = sorted(results)
else:
if u'aboutcode_manager_notice' in results:
results = results['components']
elif u'scancode_notice' in results:
results = results['files']
else:
results = [results]
return results
# FIXME: rename to is_online: BUT do we really need this at all????
def have_network_connection():
"""
Return True if an HTTP connection to some public web site is possible.
"""
import socket
import http.client as httplib
http_connection = httplib.HTTPConnection('dejacode.org', timeout=10) # NOQA
try:
http_connection.connect()
except socket.error:
return False
else:
return True
def extract_zip(location):
"""
Extract a zip file at location in a temp directory and return the temporary
directory where the archive was extracted.
"""
import zipfile
import tempfile
if not zipfile.is_zipfile(location):
raise Exception('Incorrect zip file %(location)r' % locals())
archive_base_name = os.path.basename(location).replace('.zip', '')
base_dir = tempfile.mkdtemp(prefix='aboutcode-toolkit-extract-')
target_dir = os.path.join(base_dir, archive_base_name)
target_dir = add_unc(target_dir)
os.makedirs(target_dir)
if target_dir.endswith((ntpath.sep, posixpath.sep)):
target_dir = target_dir[:-1]
with zipfile.ZipFile(location) as zipf:
for info in zipf.infolist():
name = info.filename
content = zipf.read(name)
target = os.path.join(target_dir, name)
is_dir = target.endswith((ntpath.sep, posixpath.sep))
if is_dir:
target = target[:-1]
parent = os.path.dirname(target)
if on_windows:
target = target.replace(posixpath.sep, ntpath.sep)
parent = parent.replace(posixpath.sep, ntpath.sep)
if not os.path.exists(parent):
os.makedirs(add_unc(parent))
if not content and is_dir:
if not os.path.exists(target):
os.makedirs(add_unc(target))
if not os.path.exists(target):
with open(target, 'wb') as f:
f.write(content)
return target_dir
def add_unc(location):
"""
Convert a `location` to an absolute Window UNC path to support long paths on
Windows. Return the location unchanged if not on Windows. See
https://msdn.microsoft.com/en-us/library/aa365247.aspx
"""
if on_windows and not location.startswith(UNC_PREFIX):
if location.startswith(UNC_PREFIX_POSIX):
return UNC_PREFIX + os.path.abspath(location.strip(UNC_PREFIX_POSIX))
return UNC_PREFIX + os.path.abspath(location)
return location
# FIXME: add docstring
def copy_license_notice_files(fields, base_dir, reference_dir, afp):
"""
Given a list of (key, value) `fields` tuples and a `base_dir` where ABOUT
files and their companion LICENSe are store, and an extra `reference_dir`
where reference license an notice files are stored and the `afp`
about_file_path value, this function will copy to the base_dir the
license_file or notice_file if found in the reference_dir
"""
errors = []
copy_file_name = ''
for key, value in fields:
if key == 'license_file' or key == 'notice_file':
if value:
# This is to handle multiple license_file value in CSV format
# The following code will construct a list to contain the
# license file(s) that need to be copied.
# Note that *ONLY* license_file field allows \n. Others file
# fields that have \n will prompts error at validation stage
file_list = []
if '\n' in value:
f_list = value.split('\n')
else:
if not isinstance(value, list):
f_list = [value]
else:
f_list = value
# The following code is to adopt the approach from #404
# to use comma for multiple files which refer the same license
for item in f_list:
if ',' in item:
item_list = item.split(',')
for i in item_list:
file_list.append(i.strip())
else:
file_list.append(item)
else:
continue
for copy_file_name in file_list:
from_lic_path = posixpath.join(to_posix(reference_dir), copy_file_name)
about_file_dir = os.path.dirname(to_posix(afp)).lstrip('/')
to_lic_path = posixpath.join(to_posix(base_dir), about_file_dir)
if not os.path.exists(posixpath.join(to_lic_path, copy_file_name)):
err = copy_file(from_lic_path, to_lic_path)
if err:
errors.append(err)
return errors
def copy_file(from_path, to_path):
error = ''
# Return if the from_path is empty or None.
if not from_path:
return
if on_windows:
if not from_path.startswith(UNC_PREFIXES):
from_path = add_unc(from_path)
if not to_path.startswith(UNC_PREFIXES):
to_path = add_unc(to_path)
# Strip the white spaces
from_path = from_path.strip()
to_path = to_path.strip()
# Errors will be captured when doing the validation
if not os.path.exists(from_path):
return ''
if not posixpath.exists(to_path):
os.makedirs(to_path)
try:
if os.path.isdir(from_path):
# Copy the whole directory structure
if from_path.endswith('/'):
from_path = from_path.rpartition('/')[0]
folder_name = os.path.basename(from_path)
to_path = os.path.join(to_path, folder_name)
if os.path.exists(to_path):
msg = to_path + ' is already existed and is replaced by ' + from_path
error = Error(WARNING, msg)
copy_tree(from_path, to_path)
else:
file_name = os.path.basename(from_path)
to_file_path = os.path.join(to_path, file_name)
if os.path.exists(to_file_path):
msg = to_file_path + ' is already existed and is replaced by ' + from_path
error = Error(WARNING, msg)
shutil.copy2(from_path, to_path)
return error
except Exception as e:
msg = 'Cannot copy file at %(from_path)r.' % locals()
error = Error(CRITICAL, msg)
return error
# FIXME: we should use a license object instead
def ungroup_licenses(licenses):
"""
Ungroup multiple licenses information
"""
lic_key = []
lic_name = []
lic_file = []
lic_url = []
for lic in licenses:
if 'key' in lic:
lic_key.append(lic['key'])
if 'name' in lic:
lic_name.append(lic['name'])
if 'file' in lic:
lic_file.append(lic['file'])
if 'url' in lic:
lic_url.append(lic['url'])
return lic_key, lic_name, lic_file, lic_url
# FIXME: add docstring
def format_about_dict_for_csv_output(about_dictionary_list):
csv_formatted_list = []
for element in about_dictionary_list:
row_list = dict()
for key in element:
if element[key]:
if isinstance(element[key], list):
row_list[key] = u'\n'.join((element[key]))
elif key == u'about_resource':
row_list[key] = u'\n'.join((element[key].keys()))
else:
row_list[key] = element[key]
csv_formatted_list.append(row_list)
return csv_formatted_list
# FIXME: add docstring
def format_about_dict_for_json_output(about_dictionary_list):
licenses = ['license_key', 'license_name', 'license_file', 'license_url']
json_formatted_list = []
for element in about_dictionary_list:
row_list = dict()
# FIXME: aboid using parallel list... use an object instead
license_key = []
license_name = []
license_file = []
license_url = []
for key in element:
if element[key]:
# The 'about_resource' is an ordered dict
if key == 'about_resource':
row_list[key] = list(element[key].keys())[0]
elif key in licenses:
if key == 'license_key':
license_key = element[key]
elif key == 'license_name':
license_name = element[key]
elif key == 'license_file':
license_file = element[key]
elif key == 'license_url':
license_url = element[key]
else:
row_list[key] = element[key]
# Group the same license information in a list
license_group = list(zip_longest(license_key, license_name, license_file, license_url))
if license_group:
licenses_list = []
for lic_group in license_group:
lic_dict = dict()
if lic_group[0]:
lic_dict['key'] = lic_group[0]
if lic_group[1]:
lic_dict['name'] = lic_group[1]
if lic_group[2]:
lic_dict['file'] = lic_group[2]
if lic_group[3]:
lic_dict['url'] = lic_group[3]
licenses_list.append(lic_dict)
row_list['licenses'] = licenses_list
json_formatted_list.append(row_list)
return json_formatted_list
def unique(sequence):
"""
Return a list of unique items found in sequence. Preserve the original
sequence order.
For example:
>>> unique([1, 5, 3, 5])
[1, 5, 3]
"""
deduped = []
for item in sequence:
if item not in deduped:
deduped.append(item)
return deduped
def filter_errors(errors, minimum_severity=WARNING):
"""
Return a list of unique `errors` Error object filtering errors that have a
severity below `minimum_severity`.
"""
return unique([e for e in errors if e.severity >= minimum_severity])
def create_dir(location):
"""
Create directory or directory tree at location, ensuring it is readable
and writeable.
"""
import stat
if not os.path.exists(location):
os.makedirs(location)
os.chmod(location, stat.S_IRWXU | stat.S_IRWXG
| stat.S_IROTH | stat.S_IXOTH)
def get_temp_dir(sub_dir_path=None):
"""
Create a unique new temporary directory location. Create directories
identified by sub_dir_path if provided in this temporary directory.
Return the location for this unique directory joined with the
sub_dir_path if any.
"""
new_temp_dir = build_temp_dir()
if sub_dir_path:
# create a sub directory hierarchy if requested
new_temp_dir = os.path.join(new_temp_dir, sub_dir_path)
create_dir(new_temp_dir)
return new_temp_dir
def build_temp_dir(prefix='attributecode-'):
"""
Create and return a new unique empty directory created in base_dir.
"""
import tempfile
location = tempfile.mkdtemp(prefix=prefix)
create_dir(location)
return location
"""
Return True if a string s name is safe to use as an attribute name.
"""
is_valid_name = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$').match
|
from collections import namedtuple
import common.numpy_fast as np
from common.realtime import sec_since_boot
from selfdrive.config import CruiseButtons
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.controls.lib.drive_helpers import rate_limit
from common.numpy_fast import clip, interp
def actuator_hystereses(final_brake, braking, brake_steady, v_ego, civic):
# hyst params... TODO: move these to VehicleParams
brake_hyst_on = 0.055 if civic else 0.1 # to activate brakes exceed this value
brake_hyst_off = 0.005 # to deactivate brakes below this value
brake_hyst_gap = 0.01 # don't change brake command for small ocilalitons within this value
#*** histeresys logic to avoid brake blinking. go above 0.1 to trigger
if (final_brake < brake_hyst_on and not braking) or final_brake < brake_hyst_off:
final_brake = 0.
braking = final_brake > 0.
# for small brake oscillations within brake_hyst_gap, don't change the brake command
if final_brake == 0.:
brake_steady = 0.
elif final_brake > brake_steady + brake_hyst_gap:
brake_steady = final_brake - brake_hyst_gap
elif final_brake < brake_steady - brake_hyst_gap:
brake_steady = final_brake + brake_hyst_gap
final_brake = brake_steady
if not civic:
brake_on_offset_v = [.25, .15] # min brake command on brake activation. below this no decel is perceived
brake_on_offset_bp = [15., 30.] # offset changes VS speed to not have too abrupt decels at high speeds
# offset the brake command for threshold in the brake system. no brake torque perceived below it
brake_on_offset = interp(v_ego, brake_on_offset_bp, brake_on_offset_v)
brake_offset = brake_on_offset - brake_hyst_on
if final_brake > 0.0:
final_brake += brake_offset
return final_brake, braking, brake_steady
class AH:
#[alert_idx, value]
# See dbc files for info on values"
NONE = [0, 0]
FCW = [1, 0x8]
STEER = [2, 1]
BRAKE_PRESSED = [3, 10]
GEAR_NOT_D = [4, 6]
SEATBELT = [5, 5]
SPEED_TOO_HIGH = [6, 8]
def process_hud_alert(hud_alert):
# initialize to no alert
fcw_display = 0
steer_required = 0
acc_alert = 0
if hud_alert == AH.NONE: # no alert
pass
elif hud_alert == AH.FCW: # FCW
fcw_display = hud_alert[1]
elif hud_alert == AH.STEER: # STEER
steer_required = hud_alert[1]
else: # any other ACC alert
acc_alert = hud_alert[1]
return fcw_display, steer_required, acc_alert
import selfdrive.car.honda.hondacan as hondacan
HUDData = namedtuple("HUDData",
["pcm_accel", "v_cruise", "X2", "car", "X4", "X5",
"lanes", "beep", "X8", "chime", "acc_alert"])
class CarController(object):
def __init__(self):
self.braking = False
self.brake_steady = 0.
self.final_brake_last = 0.
# redundant safety check with the board
self.controls_allowed = False
def update(self, sendcan, enabled, CS, frame, final_gas, final_brake, final_steer, \
pcm_speed, pcm_override, pcm_cancel_cmd, pcm_accel, \
hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert, \
snd_beep, snd_chime):
""" Controls thread """
# TODO: Make the accord work.
if CS.accord:
return
# *** apply brake hysteresis ***
final_brake, self.braking, self.brake_steady = actuator_hystereses(final_brake, self.braking, self.brake_steady, CS.v_ego, CS.civic)
# *** no output if not enabled ***
if not enabled:
final_gas = 0.
final_brake = 0.
final_steer = 0.
# send pcm acc cancel cmd if drive is disabled but pcm is still on, or if the system can't be activated
if CS.pcm_acc_status:
pcm_cancel_cmd = True
# *** rate limit after the enable check ***
final_brake = rate_limit(final_brake, self.final_brake_last, -2., 1./100)
self.final_brake_last = final_brake
# vehicle hud display, wait for one update from 10Hz 0x304 msg
#TODO: use enum!!
if hud_show_lanes:
hud_lanes = 0x04
else:
hud_lanes = 0x00
# TODO: factor this out better
if enabled:
if hud_show_car:
hud_car = 0xe0
else:
hud_car = 0xd0
else:
hud_car = 0xc0
#print chime, alert_id, hud_alert
fcw_display, steer_required, acc_alert = process_hud_alert(hud_alert)
hud = HUDData(int(pcm_accel), int(hud_v_cruise), 0x01, hud_car,
0xc1, 0x41, hud_lanes + steer_required,
int(snd_beep), 0x48, (snd_chime << 5) + fcw_display, acc_alert)
if not all(isinstance(x, int) and 0 <= x < 256 for x in hud):
print "INVALID HUD", hud
hud = HUDData(0xc6, 255, 64, 0xc0, 209, 0x41, 0x40, 0, 0x48, 0, 0)
# **** process the car messages ****
# *** compute control surfaces ***
tt = sec_since_boot()
GAS_MAX = 1004
BRAKE_MAX = 1024/4
if CS.crv:
STEER_MAX = 0x300 # CR-V only uses 12-bits and requires a lower value
else:
STEER_MAX = 0xF00
GAS_OFFSET = 328
# steer torque is converted back to CAN reference (positive when steering right)
apply_gas = int(clip(final_gas*GAS_MAX, 0, GAS_MAX-1))
apply_brake = int(clip(final_brake*BRAKE_MAX, 0, BRAKE_MAX-1))
apply_steer = int(clip(-final_steer*STEER_MAX, -STEER_MAX, STEER_MAX))
# no gas if you are hitting the brake or the user is
if apply_gas > 0 and (apply_brake != 0 or CS.brake_pressed):
print "CANCELLING GAS", apply_brake
apply_gas = 0
# no computer brake if the gas is being pressed
if CS.car_gas > 0 and apply_brake != 0:
print "CANCELLING BRAKE"
apply_brake = 0
# any other cp.vl[0x18F]['STEER_STATUS'] is common and can happen during user override. sending 0 torque to avoid EPS sending error 5
if CS.steer_not_allowed:
print "STEER ALERT, TORQUE INHIBITED"
apply_steer = 0
# *** entry into controls state ***
if (CS.prev_cruise_buttons == CruiseButtons.DECEL_SET or CS.prev_cruise_buttons == CruiseButtons.RES_ACCEL) and \
CS.cruise_buttons == 0 and not self.controls_allowed:
print "CONTROLS ARE LIVE"
self.controls_allowed = True
# *** exit from controls state on cancel, gas, or brake ***
if (CS.cruise_buttons == CruiseButtons.CANCEL or CS.brake_pressed or
CS.user_gas_pressed or (CS.pedal_gas > 0 and CS.brake_only)) and self.controls_allowed:
print "CONTROLS ARE DEAD"
self.controls_allowed = False
# *** controls fail on steer error, brake error, or invalid can ***
if CS.steer_error:
print "STEER ERROR"
self.controls_allowed = False
if CS.brake_error:
print "BRAKE ERROR"
self.controls_allowed = False
if not CS.can_valid and self.controls_allowed: # 200 ms
print "CAN INVALID"
self.controls_allowed = False
# Send CAN commands.
can_sends = []
# Send steering command.
if CS.accord:
idx = frame % 2
can_sends.append(hondacan.create_accord_steering_control(apply_steer, idx))
else:
idx = frame % 4
can_sends.extend(hondacan.create_steering_control(apply_steer, CS.crv, idx))
# Send gas and brake commands.
if (frame % 2) == 0:
idx = (frame / 2) % 4
can_sends.append(
hondacan.create_brake_command(apply_brake, pcm_override,
pcm_cancel_cmd, hud.chime, idx))
if not CS.brake_only:
# send exactly zero if apply_gas is zero. Interceptor will send the max between read value and apply_gas.
# This prevents unexpected pedal range rescaling
gas_amount = (apply_gas + GAS_OFFSET) * (apply_gas > 0)
can_sends.append(hondacan.create_gas_command(gas_amount, idx))
# Send dashboard UI commands.
if (frame % 10) == 0:
idx = (frame/10) % 4
can_sends.extend(hondacan.create_ui_commands(pcm_speed, hud, CS.civic, CS.accord, CS.crv, idx))
# radar at 20Hz, but these msgs need to be sent at 50Hz on ilx (seems like an Acura bug)
if CS.civic or CS.accord or CS.crv:
radar_send_step = 5
else:
radar_send_step = 2
if (frame % radar_send_step) == 0:
idx = (frame/radar_send_step) % 4
can_sends.extend(hondacan.create_radar_commands(CS.v_ego, CS.civic, CS.accord, CS.crv, idx))
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan').to_bytes())
|
"""
MISSING INDICATOR
- Adding a variable to capture NA
- In previous examples we learnt how to replace missing values by the mean, median or by extracting a random value.
- In other words we learnt about mean / median and random sample imputation.
- These methods assume that the data are missing completely at random (MCAR).
- There are other methods that can be used when values are not missing at random,
- for example arbitrary value imputation or end of distribution imputation.
- However, these imputation techniques will affect the variable distribution dramatically,
- and are therefore not suitable for linear models.
So what can we do if data are not MCAR and we want to use linear models?
- If data are not missing at random, it is a good idea to replace missing observations by the mean / median / mode AND flag those missing observations as well with a Missing Indicator.
- A Missing Indicator is an additional binary variable, which indicates whether the data was missing for an observation (1) or not (0).
For which variables can I add a missing indicator?
- We can add a missing indicator to both numerical and categorical variables.
Note
- Adding a missing indicator is never used alone.
- On the contrary, it is always used together with another imputation technique, which can be mean / median imputation for numerical variables, or frequent category imputation for categorical variables.
- We can also use random sample imputation together with adding a missing indicator for both categorical and numerical variables.
Commonly used together:
- Mean / median imputation + missing indicator (Numerical variables)
- Frequent category imputation + missing indicator (Categorical variables)
- Random sample Imputation + missing indicator (Numerical and categorical)
Assumptions
- Data is not missing at random
- Missing data are predictive
Advantages
- Easy to implement
- Captures the importance of missing data if there is one
Limitations
- Expands the feature space
- Original variable still needs to be imputed to remove the NaN
- Adding a missing indicator will increase 1 variable per variable in the dataset with missing values. So if the dataset contains 10 features, and all of them have missing values, after adding a missing indicator we will have a dataset with 20 features: the original 10 features plus additional 10 binary features, which indicate for each of the original variables whether the value was missing or not. This may not be a problem in datasets with tens to a few hundreds variables, but if our original dataset contains thousands of variables, by creating an additional variable to indicate NA, we will end up with very big datasets.
Important
- In addition, data tends to be missing for the same observation across multiple variables, which often leads to many of the missing indicator variables to be actually similar or identical to each other.
Final note
- Typically, mean / median / mode imputation is done together with adding a variable to capture those observations where the data was missing, thus covering 2 angles: if the data was missing completely at random, this would be contemplated by the mean / median / mode imputation, and if it wasn't this would be captured by the missing indicator.
- Both methods are extremely straight forward to implement, and therefore are a top choice in data science competitions. See for example the winning solution of the KDD 2009 cup: "Winning the KDD Cup Orange Challenge with Ensemble Selection.
Below lets see example on how to perform missing indicator imputation on house price and titanic dataset
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
pd.set_option('display.max_columns', None)
""" Missing indicator on titanic dataset """
# load the Titanic Dataset with a few variables for demonstration
data = pd.read_csv('dataset/titanic.csv', usecols=['age', 'fare', 'survived'])
data.head()
"""
survived age fare
0 1 29.0000 211.3375
1 1 0.9167 151.5500
2 0 2.0000 151.5500
3 0 30.0000 151.5500
4 0 25.0000 151.5500 """
# let's look at the percentage of NA
data.isnull().mean()
"""
survived 0.000000
age 0.200917
fare 0.000764
dtype: float64 """
"""
- To add a binary missing indicator, we don't necessarily need to learn anything from the training set, so in principle we could do this in the original dataset and then separate into train and test.
- However, I do not recommend this practice.
- In addition, if you are using scikit-learn to add the missing indicator, the indicator as it is designed, needs to learn from the train set, which features to impute, this is, which are the features for which the binary variable needs to be added. We will see more about different implementations of missing indicators in future examples.
- For now, let's see how to create a binary missing indicator manually. """
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(
data[['age', 'fare']], # predictors
data['survived'], # target
test_size=0.3, # percentage of obs in test set
random_state=0) # seed to ensure reproducibility
X_train.shape, X_test.shape
# ((916, 2), (393, 2))
# Let's explore the missing data in the train set the percentages should be fairly similar to those of the whole dataset
X_train.isnull().mean()
""" age 0.191048
fare 0.000000
dtype: float64 """
# add the missing indicator
# this is done very simply by using np.where from numpy and isnull from pandas:
X_train['Age_NA'] = np.where(X_train['age'].isnull(), 1, 0)
X_test['Age_NA'] = np.where(X_test['age'].isnull(), 1, 0)
X_train.head()
"""
age fare Age_NA
501 13.0 19.5000 0
588 4.0 23.0000 0
402 30.0 13.8583 0
1193 NaN 7.7250 1
686 22.0 7.7250 0 """
# the mean of the binary variable, coincides with the
# perentage of missing values in the original variable
X_train['Age_NA'].mean()
# 0.19104803493449782
# yet the original variable, still shows the missing values which need to be replaced by any of the techniques we have learnt
X_train.isnull().mean()
"""
age 0.191048
fare 0.000000
Age_NA 0.000000
dtype: float64 """
# for example median imputation
median = X_train['age'].median()
X_train['age'] = X_train['age'].fillna(median)
X_test['age'] = X_test['age'].fillna(median)
# check that there are no more missing values
X_train.isnull().mean()
"""
age 0.0
fare 0.0
Age_NA 0.0
dtype: float64 """
""" Missing indicator on House Prices dataset """
# we are going to use the following variables, some are categorical some are numerical
cols_to_use = [
'LotFrontage', 'MasVnrArea', # numerical
'BsmtQual', 'FireplaceQu', # categorical
'SalePrice' # target
]
data = pd.read_csv('dataset/house-prices-advanced-regression-techniques/train.csv', usecols=cols_to_use)
data.head()
"""
LotFrontage MasVnrArea BsmtQual FireplaceQu SalePrice
0 65.0 196.0 Gd NaN 208500
1 80.0 0.0 Gd TA 181500
2 68.0 162.0 Gd TA 223500
3 60.0 0.0 TA Gd 140000
4 84.0 350.0 Gd TA 250000 """
# let's inspect the variables with missing values
data.isnull().mean()
"""
LotFrontage 0.177397
MasVnrArea 0.005479
BsmtQual 0.025342
FireplaceQu 0.472603
SalePrice 0.000000
dtype: float64 """
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data,
data['SalePrice'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# ((1022, 5), (438, 5))
# let's make a function to add a missing indicator binary variable
def missing_indicator(df, variable):
return np.where(df[variable].isnull(), 1, 0)
# let's loop over all the variables and add a binary
# missing indicator with the function we created
for variable in cols_to_use:
X_train[variable+'_NA'] = missing_indicator(X_train, variable)
X_test[variable+'_NA'] = missing_indicator(X_test, variable)
X_train.head()
"""
LotFrontage MasVnrArea BsmtQual FireplaceQu SalePrice LotFrontage_NA \
64 NaN 573.0 Gd NaN 219500 1
682 NaN 0.0 Gd Gd 173000 1
960 50.0 0.0 TA NaN 116500 0
1384 60.0 0.0 TA NaN 105000 0
1100 60.0 0.0 TA NaN 60000 0
MasVnrArea_NA BsmtQual_NA FireplaceQu_NA SalePrice_NA
64 0 0 1 0
682 0 0 0 0
960 0 0 1 0
1384 0 0 1 0
1100 0 0 1 0 """
# now let's evaluate the mean value of the missing indicators
# first I capture the missing indicator variables with a
# list comprehension
missing_ind = [col for col in X_train.columns if 'NA' in col]
# calculate the mean
X_train[missing_ind].mean()
"""
LotFrontage_NA 0.184932
MasVnrArea_NA 0.004892
BsmtQual_NA 0.023483
FireplaceQu_NA 0.467710
SalePrice_NA 0.000000
dtype: float64 """
# the mean of the missing indicator coincides with the percentage of missing values in the original variable
X_train.isnull().mean()
"""
LotFrontage 0.184932
MasVnrArea 0.004892
BsmtQual 0.023483
FireplaceQu 0.467710
SalePrice 0.000000
LotFrontage_NA 0.000000
MasVnrArea_NA 0.000000
BsmtQual_NA 0.000000
FireplaceQu_NA 0.000000
SalePrice_NA 0.000000
dtype: float64 """
# let's make a function to fill missing values with a value: we have use a similar function in our previous examples so you are probably familiar with it
def impute_na(df, variable, value):
return df[variable].fillna(value)
# let's impute the NA with the median for numerical variables remember that we calculate the median using the train set
median = X_train['LotFrontage'].median()
X_train['LotFrontage'] = impute_na(X_train, 'LotFrontage', median)
X_test['LotFrontage'] = impute_na(X_test, 'LotFrontage', median)
median = X_train['MasVnrArea'].median()
X_train['MasVnrArea'] = impute_na(X_train, 'MasVnrArea', median)
X_test['MasVnrArea'] = impute_na(X_test, 'MasVnrArea', median)
# let's impute the NA in categorical variables by the most frequent category (aka the mode) the mode needs to be learnt from the train set
mode = X_train['BsmtQual'].mode()[0]
X_train['BsmtQual'] = impute_na(X_train, 'BsmtQual', mode)
X_test['BsmtQual'] = impute_na(X_test, 'BsmtQual', mode)
mode = X_train['FireplaceQu'].mode()[0]
X_train['FireplaceQu'] = impute_na(X_train, 'FireplaceQu', mode)
X_test['FireplaceQu'] = impute_na(X_test, 'FireplaceQu', mode)
# and now let's check there are no more NA
X_train.isnull().mean()
""" LotFrontage 0.0
MasVnrArea 0.0
BsmtQual 0.0
FireplaceQu 0.0
SalePrice 0.0
LotFrontage_NA 0.0
MasVnrArea_NA 0.0
BsmtQual_NA 0.0
FireplaceQu_NA 0.0
SalePrice_NA 0.0
dtype: float64 """
"""
OBSERVATION:
As you can see, we have now the double of features respect to the original dataset. The original dataset had 4 variables, the pre-processed dataset contains 8, plus the target.
Thats it for now, happy learning"""
|
# -*- coding: utf-8 -*-
"""
@author: Yi Zhang.
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft, Delft, Netherlands
"""
import sys
if './' not in sys.path: sys.path.append('./')
from root.config.main import *
from objects.CSCG._3d.forms.standard._2s.discretize.main import _3dCSCG_Discretize
from objects.CSCG._3d.forms.standard.base.main import _3dCSCG_Standard_Form
from objects.CSCG._3d.forms.standard._2s.special.main import _2Form_Special
from objects.CSCG._3d.forms.standard._2s.project.main import _2Form_Projection
from objects.CSCG._3d.forms.standard._2s.reconstruct import _3dCSCG_SF2_reconstruct
from objects.CSCG._3d.forms.standard._2s.inheriting.private import _3dCSCG_S2F_Private
from objects.CSCG._3d.forms.standard._2s.visualize.main import _3dCSCG_S2F_VISUALIZE
class _3dCSCG_2Form(_3dCSCG_S2F_Private, _3dCSCG_Standard_Form):
"""
Standard 2-form.
:param mesh:
:param space:
:param is_hybrid:
:param orientation:
:param numbering_parameters:
:param name:
"""
def __init__(self, mesh, space, is_hybrid=True,
orientation='outer', numbering_parameters='Naive', name=None):
if name is None:
if is_hybrid:
name = 'hybrid-' + orientation + '-oriented-2-form'
else:
name = orientation + '-oriented-2-form'
super().__init__(mesh, space, is_hybrid, orientation, numbering_parameters, name)
self._k_ = 2
self.standard_properties.___PRIVATE_add_tag___('3dCSCG_standard_2form')
self._special_ = _2Form_Special(self)
self._projection_ = _2Form_Projection(self)
self.___PRIVATE_reset_cache___()
self._discretize_ = None
self._reconstruct_ = None
self._visualize_ = None
self._freeze_self_()
def ___PRIVATE_reset_cache___(self):
super().___PRIVATE_reset_cache___()
def ___PRIVATE_TW_FUNC_body_checker___(self, func_body):
assert func_body.mesh.domain == self.mesh.domain
assert func_body.ndim == self.ndim == 3
if func_body.__class__.__name__ == '_3dCSCG_VectorField':
assert func_body.ftype in ('standard',), \
f"3dCSCG 2form FUNC do not accept func _3dCSCG_VectorField of ftype {func_body.ftype}."
else:
raise Exception(f"3dCSCG 2form FUNC do not accept func {func_body.__class__}")
def ___PRIVATE_TW_BC_body_checker___(self, func_body):
assert func_body.mesh.domain == self.mesh.domain
assert func_body.ndim == self.ndim == 3
if func_body.__class__.__name__ == '_3dCSCG_VectorField':
assert func_body.ftype in ('standard','boundary-wise'), \
f"3dCSCG 2form BC do not accept func _3dCSCG_VectorField of ftype {func_body.ftype}."
else:
raise Exception(f"3dCSCG 2form BC do not accept func {func_body.__class__}")
@property
def special(self):
return self._special_
@property
def projection(self):
"""A wrapper of all projection methods."""
return self._projection_
@property
def discretize(self):
if self._discretize_ is None:
self._discretize_ = _3dCSCG_Discretize(self)
return self._discretize_
@property
def reconstruct(self):
if self._reconstruct_ is None:
self._reconstruct_ = _3dCSCG_SF2_reconstruct(self)
return self._reconstruct_
@property
def visualize(self):
if self._visualize_ is None:
self._visualize_ = _3dCSCG_S2F_VISUALIZE(self)
return self._visualize_
if __name__ == '__main__':
# mpiexec -n 5 python objects/CSCG/_3d/forms/standard/_2s/main.py
from objects.CSCG._3d.master import MeshGenerator, SpaceInvoker, FormCaller
mesh = MeshGenerator('crazy', c=0.0)([3,3,3])
space = SpaceInvoker('polynomials')([('Lobatto',3), ('Lobatto',3), ('Lobatto',3)])
FC = FormCaller(mesh, space)
def u(t,x,y,z): return np.sin(np.pi*x)*np.cos(2*np.pi*y)*np.cos(np.pi*z) + t
def v(t,x,y,z): return np.cos(np.pi*x)*np.sin(np.pi*y)*np.cos(2*np.pi*z) + t
def w(t,x,y,z): return np.cos(np.pi*x)*np.cos(np.pi*y)*np.sin(2*np.pi*z) + t
velocity = FC('vector', (u,v,w))
U = FC('scalar', u)
V = FC('scalar', v)
W = FC('scalar', w)
f2 = FC('2-f', is_hybrid=False)
f2.TW.func.do.set_func_body_as(velocity)
f2.TW.current_time = 0
f2.TW.___DO_push_all_to_instant___()
f2.discretize()
f2.visualize(x=0.25)
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class IgmpMldRange(Base):
"""
The IgmpMldRange class encapsulates a list of igmpMldRange resources that are managed by the user.
A list of resources can be retrieved from the server using the IgmpMldRange.find() method.
The list can be managed by using the IgmpMldRange.add() and IgmpMldRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'igmpMldRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'GeneralQueryResponseMode': 'generalQueryResponseMode',
'ImmediateResponse': 'immediateResponse',
'JoinLeaveMultiplier': 'joinLeaveMultiplier',
'MeshingMode': 'meshingMode',
'Name': 'name',
'ObjectId': 'objectId',
'ReportFrequency': 'reportFrequency',
'RouterAlert': 'routerAlert',
'SpecificQueryResponseMode': 'specificQueryResponseMode',
'UnsolicitedResponseMode': 'unsolicitedResponseMode',
'Version': 'version',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(IgmpMldRange, self).__init__(parent, list_op)
@property
def JoinLeaveMulticastGroupRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.joinleavemulticastgrouprange_7c48c476a64513e69ee2e7e2a4bb1b29.JoinLeaveMulticastGroupRange): An instance of the JoinLeaveMulticastGroupRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.joinleavemulticastgrouprange_7c48c476a64513e69ee2e7e2a4bb1b29 import JoinLeaveMulticastGroupRange
if self._properties.get('JoinLeaveMulticastGroupRange', None) is not None:
return self._properties.get('JoinLeaveMulticastGroupRange')
else:
return JoinLeaveMulticastGroupRange(self)
@property
def MulticastGroupRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.multicastgrouprange_d20a8c4f3b44559cebc9bd55b37a3438.MulticastGroupRange): An instance of the MulticastGroupRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.multicastgrouprange_d20a8c4f3b44559cebc9bd55b37a3438 import MulticastGroupRange
if self._properties.get('MulticastGroupRange', None) is not None:
return self._properties.get('MulticastGroupRange')
else:
return MulticastGroupRange(self)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def GeneralQueryResponseMode(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, responds to General Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'])
@GeneralQueryResponseMode.setter
def GeneralQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'], value)
@property
def ImmediateResponse(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
"""
return self._get_attribute(self._SDM_ATT_MAP['ImmediateResponse'])
@ImmediateResponse.setter
def ImmediateResponse(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ImmediateResponse'], value)
@property
def JoinLeaveMultiplier(self):
# type: () -> int
"""
Returns
-------
- number: The number of times a host sends every Join or Leave message.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'])
@JoinLeaveMultiplier.setter
def JoinLeaveMultiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'], value)
@property
def MeshingMode(self):
# type: () -> str
"""
Returns
-------
- str: Defines how the hosts in a range join the selected multicast group ranges.
"""
return self._get_attribute(self._SDM_ATT_MAP['MeshingMode'])
@MeshingMode.setter
def MeshingMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['MeshingMode'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def ReportFrequency(self):
# type: () -> int
"""
Returns
-------
- number: When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
"""
return self._get_attribute(self._SDM_ATT_MAP['ReportFrequency'])
@ReportFrequency.setter
def ReportFrequency(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['ReportFrequency'], value)
@property
def RouterAlert(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, sets the Send Router Alert bit in the IP header.
"""
return self._get_attribute(self._SDM_ATT_MAP['RouterAlert'])
@RouterAlert.setter
def RouterAlert(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['RouterAlert'], value)
@property
def SpecificQueryResponseMode(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, responds to Group-Specific Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'])
@SpecificQueryResponseMode.setter
def SpecificQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'], value)
@property
def UnsolicitedResponseMode(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'])
@UnsolicitedResponseMode.setter
def UnsolicitedResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'], value)
@property
def Version(self):
# type: () -> str
"""
Returns
-------
- str: IGMP/MLD protocol version.
"""
return self._get_attribute(self._SDM_ATT_MAP['Version'])
@Version.setter
def Version(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Version'], value)
def update(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, JoinLeaveMultiplier=None, MeshingMode=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, UnsolicitedResponseMode=None, Version=None):
# type: (bool, bool, bool, int, str, str, int, bool, bool, bool, str) -> IgmpMldRange
"""Updates igmpMldRange resource on the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- MeshingMode (str): Defines how the hosts in a range join the selected multicast group ranges.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, JoinLeaveMultiplier=None, MeshingMode=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, UnsolicitedResponseMode=None, Version=None):
# type: (bool, bool, bool, int, str, str, int, bool, bool, bool, str) -> IgmpMldRange
"""Adds a new igmpMldRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- MeshingMode (str): Defines how the hosts in a range join the selected multicast group ranges.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
Returns
-------
- self: This instance with all currently retrieved igmpMldRange resources using find and the newly added igmpMldRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained igmpMldRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, JoinLeaveMultiplier=None, MeshingMode=None, Name=None, ObjectId=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, UnsolicitedResponseMode=None, Version=None):
# type: (bool, bool, bool, int, str, str, str, int, bool, bool, bool, str) -> IgmpMldRange
"""Finds and retrieves igmpMldRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve igmpMldRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all igmpMldRange resources from the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- MeshingMode (str): Defines how the hosts in a range join the selected multicast group ranges.
- Name (str): Name of range
- ObjectId (str): Unique identifier for this object
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
Returns
-------
- self: This instance with matching igmpMldRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of igmpMldRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the igmpMldRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Apply changes for on the fly configuration support.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def IgmpMldJoin(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldJoin operation on the server.
Join IGMP/MLD multicast group ranges on the fly
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldJoin(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldJoin(Arg2=enum, async_operation=bool)
--------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldJoin', payload=payload, response_object=None)
def IgmpMldLeave(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldLeave operation on the server.
Leave IGMP/MLD multicast group ranges on the fly
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldLeave(async_operation=bool)
----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldLeave(Arg2=enum, async_operation=bool)
---------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldLeave', payload=payload, response_object=None)
def IgmpMldStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldStart operation on the server.
Start IGMP/MLD on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldStart(async_operation=bool)
----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldStart(Arg2=enum, async_operation=bool)
---------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldStart', payload=payload, response_object=None)
def IgmpMldStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldStop operation on the server.
Stop IGMP/MLD on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldStop(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldStop(Arg2=enum, async_operation=bool)
--------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldStop', payload=payload, response_object=None)
|
import pyfits as pf
import numpy as np
import matplotlib.pyplot as plt
from MuSCADeT import MCA
from MuSCADeT import pca_ring_spectrum as pcas
import scipy.stats as sc
from MuSCADeT import colour_subtraction as cs
## Openning data cube
cube = pf.open('./Simu_real/Cube.fits')[0].data
num,n,n = np.shape(cube)
## A for toy model
Aprior = pf.open('Simu_real/Simu_A.fits')[0].data
## Input parameters
pca = 'PCA' #Estimation of the mixing coefficients from PCA. If different from PCA it will use the array provided in Aprior
n = 5000 #Number of iterations
nsig = 5 #Threshold in units of noise standard deviation
ns = 2 #Number of sources
angle = 5 #Resolution angle for the PCA colour estimation (start with 15 then adjust empirically)
## Running MuSCADeT
S,A = MCA.mMCA(cube, Aprior.T, nsig,n, PCA=[ns,angle], mode=pca)
hdus = pf.PrimaryHDU(S)
lists = pf.HDUList([hdus])
lists.writeto('Simu_real/Sources_'+str(n)+'.fits', clobber=True)
hdus = pf.PrimaryHDU(A)
lists = pf.HDUList([hdus])
lists.writeto('Simu_real/Estimated_A.fits', clobber=True)
cs.make_colour_sub('Simu_real/Sources_'+str(n)+'.fits',
'Simu_real/Estimated_A.fits',
'./Simu_real/Cube.fits',
prefix = './Simu_real/',
cuts = ['-0.1','0.6','-0.05','0.3','-0.02','0.1'])
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "invoice4django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
# 1. Read /Users/dkawata/work/obs/LAMOST/DR3/dr3_stellar.fits
# 2. output sels_rv.asc for gcdp-ana/lbsels.dat for mock data generation
# 3. Plot x-y distribution
#
# History:
# 29/03/2018 Written - Daisuke Kawata
#
import pyfits
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
import mwdust
from scipy import stats
from galpy.util import bovy_coords
# teff vs. Mv
# F0V-A5V
# F0V A9V
teffmv=np.array([7220.0, 7440.0, 7500.0, 7800.0, 8000.0, 8080.0])
mvmag= np.array([2.51, 2.30, 2.29, 2.07, 1.89, 1.84])
bvcol= np.array([0.294, 0.255, 0.250, 0.210, 0.170, 0.160])
# print ' mvmag and bvcol shape=',np.shape(mvmag),np.shape(bvcol)
print ' Mv =',mvmag
# Jester et al. (2005) http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php
# not use SDSS photometry
# input data
infile='/Users/dkawata/work/obs/LAMOST/DR3/LAMOSTDR3_AstarxAPASSDR9.fits'
star_hdus=pyfits.open(infile)
star=star_hdus[1].data
star_hdus.close()
# read the data
# number of data points
print 'number of stars read =',len(star['obsid'])
# select stas with teff and logg
# Galactic coordinates
Tllbb=bovy_coords.radec_to_lb(star['ra'],star['dec'],degree=True,epoch=2000.0)
glon=Tllbb[:,0]
glat=Tllbb[:,1]
sindx=np.where((star['teff']>7330.0) & (star['teff']<8040.0) \
& (star['logg']>3.2) \
& (star['Vmag']>0.0) & (star['Bmag']>0.0) \
& (star['rv_err']>0.0) & (star['rv_err']<10.0))
# & (glon>140.0) & (glon<220.0))
# & (glon>175.0) & (glon<185.0))
nstars=len(star['ra'][sindx])
print ' N selected=',nstars
# extract the necessary particle info
ra_s=star['ra'][sindx]
dec_s=star['dec'][sindx]
teff_s=star['teff'][sindx]
logg_s=star['logg'][sindx]
# from APASS DR9
vmag_s=star['Vmag'][sindx]
bmag_s=star['Bmag'][sindx]
feh_s=star['feh'][sindx]
rv_s=star['rv'][sindx]
rverr_s=star['rv_err'][sindx]
glon_s=glon[sindx]
glat_s=glat[sindx]
# absolute R mag
mvmag_s=np.interp(teff_s,teffmv,mvmag)
# extinction
# using mwdust
# need to do
# > export DUST_DIR=/Users/dkawata/work/pops/mwdust/DUST_DATA
combined=mwdust.Combined15(filter='CTIO V')
avmag=np.zeros_like(glon_s)
mod0_s=vmag_s-mvmag_s+avmag
dist0_s=np.power(10.0,(mod0_s+5.0)/5.0)*0.001
dist_s=np.power(10.0,(mod0_s+5.0)/5.0)*0.001
for i in range(0):
# distance modulus
mod_s=vmag_s-mvmag_s-avmag
dist_s=np.power(10.0,(mod_s+5.0)/5.0)*0.001
# calculate extinction
for j in range(len(glon_s)):
avmag[j]=combined(glon_s[j],glat_s[j],dist_s[j])
print ' mwdust iteration ',i,' finished'
# photometry V and V-I
# dwarf http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt
# follows V-I=B-V well up to B-V<1.5. Hence, in this purpose set V-I=B-V
vicol_s=bmag_s-vmag_s
# labes
# plt.xlabel(r"Teff",fontsize=18,fontname="serif")
# plt.ylabel(r"Mv (mag)",fontsize=18,fontname="serif",style="normal")
# scatter plot
# plt.scatter(teff_s,mvmag_s,c=dist_s,s=30,vmin=0.0,vmax=10.0)
# plt.show()
# Sun's radius used in Bland-Hawthorn & Gerhard (2016)
xsun=-8.1
# Sun's proper motion Schoenrich et al.
usun=11.1
vsun=12.24
wsun=7.25
# circular velocity
# Jo Bovy's suggestion
vcirc=30.24*np.abs(xsun)-vsun
# degree to radian
glonrad_s=glon_s*np.pi/180.0
glatrad_s=glat_s*np.pi/180.0
# x,y position
xpos_s=xsun+np.cos(glonrad_s)*dist_s*np.cos(glatrad_s)
ypos_s=np.sin(glonrad_s)*dist_s*np.cos(glatrad_s)
zpos_s=np.sin(glatrad_s)*dist_s
# rgal with Reid et al. value
rgal_s=np.sqrt(xpos_s**2+ypos_s**2)
# linear regression of metallicity gradient
slope, intercept, r_value, p_value, std_err = stats.linregress(rgal_s,feh_s)
print ' slope, intercept=',slope,intercept
# delta feh
delfeh_s=feh_s-(slope*rgal_s+intercept)
# output ascii data for test
f=open('star_pos.asc','w')
for i in range(nstars):
print >>f, "%f %f %f %f %f %f %f %f %f %f %f %f %f %f" \
%(xpos_s[i],ypos_s[i],zpos_s[i],rgal_s[i] \
,feh_s[i],delfeh_s[i],glon_s[i],glat_s[i],dist_s[i],dist0_s[i] \
,avmag[i],bmag_s[i],vmag_s[i],vicol_s[i])
f.close()
# selecting the stars with z and Glon
# 3.75 kpc 15% plx accuracy with 0.04 mas plx error.
distlim=3.75
sindxz=np.where((np.fabs(zpos_s)<0.5) & (dist_s<distlim))
nsels=len(rgal_s[sindxz])
print 'N stars(|z|<0.5 & d<',distlim,' kpc)=',nsels
# use position from the Sun
xsels=xpos_s[sindxz]-xsun
ysels=ypos_s[sindxz]
zsels=zpos_s[sindxz]
rvsels=rv_s[sindxz]
rverrsels=rverr_s[sindxz]
vmagsels=vmag_s[sindxz]
vicolsels=vicol_s[sindxz]
rasels=ra_s[sindxz]
decsels=dec_s[sindxz]
glonsels=glon_s[sindxz]
glatsels=glat_s[sindxz]
distsels=dist_s[sindxz]
teffsels=teff_s[sindxz]
loggsels=logg_s[sindxz]
avmagsels=avmag[sindxz]
# for input of lbsels
f=open('sels_rv.asc','w')
print >>f,"# nstar= %10d" % (nsels)
for i in range(nsels):
print >>f,"%12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e" \
%(xsels[i],ysels[i],zsels[i],rvsels[i],rverrsels[i] \
,vmagsels[i],vicolsels[i],rasels[i],decsels[i],glonsels[i] \
,glatsels[i],distsels[i],teffsels[i],loggsels[i],avmagsels[i])
f.close()
### plot radial metallicity distribution
# plot Cepheids data point
plt.scatter(rgal_s[sindxz],feh_s[sindxz],c=delfeh_s[sindxz],s=5,vmin=-0.1,vmax=0.25,cmap=cm.jet)
# radial gradient
nsp=10
xsp=np.linspace(4.0,20.0,nsp)
ysp=slope*xsp+intercept
plt.plot(xsp,ysp,'b-')
plt.xlabel(r"R (kpc)",fontsize=18,fontname="serif")
plt.ylabel(r"[Fe/H]",fontsize=18,fontname="serif")
plt.axis([4.0,20.0,-1.0,0.75],'scaled')
cbar=plt.colorbar()
cbar.set_label(r'$\delta$[Fe/H]')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.show()
# plot circle,
an=np.linspace(0,2.0*np.pi,100)
rad=7.0
i=0
rad=4.0
while i<15:
rad=rad+0.5
plt.plot(rad*np.cos(an),rad*np.sin(an),'k:')
i+=1
# plot arm position from Reid et al. 2014
# number of points
nsp=100
isp=0
numsp=3
while isp<numsp:
# angle in R14 is clock-wise start at the Sun at (0.0, Rsun)
# convert to the one anti-clockwise starting from +x, y=0
if isp==0:
# Scutum Arm
angen=(180.0-3.0)*np.pi/180.0
# angen=(180.0+45.0)*np.pi/180.0
angst=(180.0-101.0)*np.pi/180.0
angref=(180.0-27.6)*np.pi/180.0
rref=5.0
# pitchangle
tanpa=np.tan(19.8*np.pi/180.0)
elif isp==1:
# Sagittarius Arm
angen=(180.0+2.0)*np.pi/180.0
# angen=(180.0+45.0)*np.pi/180.0
angst=(180.0-68.0)*np.pi/180.0
angref=(180.0-25.6)*np.pi/180.0
rref=6.6
# pitchangle
tanpa=np.tan(6.9*np.pi/180.0)
else:
# Perseus Arm
angen=(180.0-88.0)*np.pi/180.0
angst=(180.0+21.0)*np.pi/180.0
angref=(180.0-14.2)*np.pi/180.0
rref=9.9
# pitchangle
tanpa=np.tan(9.4*np.pi/180.0)
# logarithmic spiral arm , log r= tan(pa) theta, in the case of anti-clockwise arm
an=np.linspace(angst,angen,nsp)
xsp=np.zeros(nsp)
ysp=np.zeros(nsp)
i=0
while i<nsp:
rsp=np.exp(tanpa*(an[i]-angref))*rref
xsp[i]=rsp*np.cos(an[i])
ysp[i]=rsp*np.sin(an[i])
i+=1
if isp==0:
plt.plot(xsp,ysp,'b-')
elif isp==1:
plt.plot(xsp,ysp,'r-')
else:
plt.plot(xsp,ysp,'g-')
isp+=1
# plot Cepheids data point
plt.scatter(xsun,0.0,marker="*",s=100,color='k')
plt.scatter(xpos_s[sindxz],ypos_s[sindxz],c=delfeh_s[sindxz],s=10,vmin=-0.5,vmax=0.5,cmap=cm.jet)
plt.xlabel(r"X (kpc)",fontsize=18,fontname="serif")
plt.ylabel(r"Y (kpc)",fontsize=18,fontname="serif")
plt.axis([-13.0,-3.0,-4.5,4.5],'scaled')
cbar=plt.colorbar()
cbar.set_label(r'$\delta$[Fe/H]')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.show()
|
import time
from datetime import timedelta
from http.cookies import SimpleCookie
from unittest.mock import Mock, AsyncMock
import pytest
from fastapi import HTTPException
from fastapi.security import SecurityScopes
from starlette.responses import Response
from fastapi_login import LoginManager
from fastapi_login.exceptions import InvalidCredentialsException
@pytest.mark.asyncio
async def test_token_expiry(clean_manager, default_data):
token = clean_manager.create_access_token(
data=default_data,
expires=timedelta(microseconds=1) # should be invalid instantly
)
time.sleep(1)
with pytest.raises(HTTPException) as exc_info:
await clean_manager.get_current_user(token)
assert exc_info
@pytest.mark.asyncio
@pytest.mark.parametrize("loader", [Mock(), AsyncMock()])
async def test_user_loader(loader, clean_manager, default_data, db):
token = clean_manager.create_access_token(data=default_data)
loader = Mock()
clean_manager.user_loader(loader)
_ = await clean_manager.get_current_user(token)
loader.assert_called()
loader.assert_called_with(default_data['sub'])
@pytest.mark.asyncio
async def test_user_loader_not_set(clean_manager, default_data):
token = clean_manager.create_access_token(data=default_data)
with pytest.raises(Exception) as exc_info:
await clean_manager.get_current_user(token)
assert "Missing user_loader callback" == str(exc_info.value)
@pytest.mark.asyncio
async def test_user_loader_returns_none(clean_manager, invalid_data, load_user_fn):
clean_manager.user_loader(load_user_fn)
token = clean_manager.create_access_token(data={"sub": invalid_data["username"]})
with pytest.raises(HTTPException) as exc_info:
await clean_manager.get_current_user(token)
assert exc_info.value == InvalidCredentialsException
def test_token_from_cookie(clean_manager):
request = Mock(cookies={clean_manager.cookie_name: "test-value"})
token = clean_manager._token_from_cookie(request)
assert token == "test-value"
def test_token_from_cookie_raises(clean_manager):
request = Mock(cookies={clean_manager.cookie_name: ""})
with pytest.raises(HTTPException) as exc_info:
clean_manager._token_from_cookie(request)
assert exc_info.value == InvalidCredentialsException
def test_token_from_cookie_returns_none_auto_error_off(clean_manager):
clean_manager.auto_error = False
request = Mock(cookies={clean_manager.cookie_name: ""})
token = clean_manager._token_from_cookie(request)
assert token is None
def test_set_cookie(clean_manager, default_data):
token = clean_manager.create_access_token(data=default_data)
response = Response()
clean_manager.set_cookie(response, token)
cookie = SimpleCookie(response.headers['set-cookie'])
cookie_value = cookie.get(clean_manager.cookie_name)
assert cookie_value is not None
assert cookie_value["httponly"] is True
assert cookie_value["samesite"] == "lax"
assert cookie_value.value == token
assert cookie_value.key == clean_manager.cookie_name
def test_config_no_cookie_no_header_raises(secret, token_url):
with pytest.raises(Exception) as exc_info:
LoginManager(secret, token_url, use_cookie=False, use_header=False)
assert "use_cookie and use_header are both False one of them needs to be True" == str(exc_info.value)
def test_has_scopes_true(clean_manager, default_data):
scopes = ["read"]
token = clean_manager.create_access_token(data=default_data, scopes=scopes)
required_scopes = SecurityScopes(scopes=scopes)
assert clean_manager.has_scopes(token, required_scopes)
def test_has_scopes_no_scopes(clean_manager, default_data):
scopes = ["read"]
token = clean_manager.create_access_token(data=default_data)
assert clean_manager.has_scopes(token, SecurityScopes(scopes=scopes)) is False
def test_has_scopes_missing_scopes(clean_manager, default_data):
scopes = ["read"]
token = clean_manager.create_access_token(data=default_data)
required_scopes = scopes + ["write"]
assert clean_manager.has_scopes(token, SecurityScopes(scopes=required_scopes)) is False
|
# Ensure that the local version of the runway module is used, not a pip
# installed version
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '.')
import os
import pytest
import numpy as np
from PIL import Image
from runway.data_types import *
from runway.exceptions import *
# UTIL FUNCTIONS ---------------------------------------------------------------
def check_data_type_interface(data_type):
assert callable(data_type.serialize)
assert callable(data_type.deserialize)
assert callable(data_type.to_dict)
# We arbitrarily use this release tag to test file download and serialization
def check_expected_contents_for_0057_file_download(path):
assert os.path.isfile(path)
with open(path, 'r') as f:
assert f.read() == '# Runway Python SDK\n'
# We arbitrarily use this release tag to test file download and serialization
def check_expected_contents_for_0057_tar_download(path):
readme_path = os.path.join(path, 'model-sdk-0.0.57', 'README.md')
assert os.path.isfile(readme_path)
with open(readme_path, 'r') as f:
assert f.read() == '# Runway Python SDK\n'
# BASIC TESTS FOR ALL DATA TYPES -----------------------------------------------
def test_data_type_interface_base_type():
check_data_type_interface(BaseType)
def test_data_type_interface_any():
check_data_type_interface(any)
def test_data_type_interface_array():
check_data_type_interface(array)
def test_data_type_interface_image():
check_data_type_interface(image)
def test_data_type_interface_vector():
check_data_type_interface(vector)
def test_data_type_interface_category():
check_data_type_interface(category)
def test_data_type_interface_number():
check_data_type_interface(number)
def test_data_type_interface_text():
check_data_type_interface(text)
def test_data_type_interface_file():
check_data_type_interface(file)
# BASE TYPE --------------------------------------------------------------------
def test_base_type_to_dict():
base_type = BaseType('base', description='Some description.')
obj = base_type.to_dict()
assert obj['type'] == 'base'
assert obj['description'] == 'Some description.'
# The BaseType is an abstract class that requires its serialize/deserialize
# methods to be overwritten by subclasses
def test_base_type_serialize_not_implemented():
base_type = BaseType('base')
with pytest.raises(NotImplementedError):
base_type.serialize('test')
def test_base_type_deserialize_not_implemented():
base_type = BaseType('base')
with pytest.raises(NotImplementedError):
base_type.deserialize('test')
# ANY --------------------------------------------------------------------------
def test_any_to_dict():
a = any()
obj = a.to_dict()
assert obj['type'] == 'any'
assert obj['description'] == None
def test_any_serialization():
a = any()
assert a.serialize(512) == 512
assert a.serialize(512.5) == 512.5
assert a.serialize('512') == '512'
assert a.serialize(None) == None
assert a.serialize(True) == True
assert a.serialize([]) == []
assert a.serialize({}) == {}
def test_any_deserialize():
a = any()
assert a.deserialize(512) == 512
assert a.deserialize(512.5) == 512.5
assert a.deserialize('512') == '512'
assert a.deserialize(None) == None
assert a.deserialize(True) == True
assert a.deserialize([]) == []
assert a.deserialize({}) == {}
# TEXT -------------------------------------------------------------------------
def test_text_to_dict():
default = 'Some default text'
description = 'A description about this variable.'
txt = text(default=default, description=description, min_length=1, max_length=20)
obj = txt.to_dict()
assert obj['type'] == 'text'
assert obj['default'] == default
assert obj['minLength'] == 1
assert obj['maxLength'] == 20
assert obj['description'] == description
def test_text_serialization():
txt = text()
assert txt.serialize(512) == '512'
def test_text_deserialize():
txt = text()
assert txt.deserialize('512') == '512'
# NUMBER -----------------------------------------------------------------------
def test_number_to_dict():
default = 42
description = 'A description about this variable.'
num = number(default=default, description=description, min=10, max=100)
obj = num.to_dict()
assert obj['type'] == 'number'
assert obj['default'] == default
assert obj['min'] == 10
assert obj['max'] == 100
assert obj['step'] == 1
assert obj['description'] == description
def test_number_serialization():
assert 1 == number().serialize(1)
assert 1.1 == number().serialize(1.1)
def test_number_deserialize():
assert 1 == number().deserialize(1)
assert 1.1 == number().deserialize(1.1)
def test_number_deserialize_numpy_scalar():
assert 10 == number().deserialize(np.float(10))
def test_number_serialize_numpy_scalar():
assert 10 == number().serialize(np.float(10))
# ARRAY ------------------------------------------------------------------------
def test_array_to_dict():
description = 'A description about this variable.'
arr = array(item_type=text, description=description, min_length=5, max_length=10)
obj = arr.to_dict()
assert obj['type'] == 'array'
assert obj['minLength'] == 5
assert obj['maxLength'] == 10
assert obj['description'] == description
tmp = text()
tmp.name = 'text_array_item'
assert obj['itemType'] == tmp.to_dict()
def test_array_no_item_type():
with pytest.raises(MissingArgumentError):
arr = array()
def test_array_serialization():
expect = ['10', '100', '1000']
assert expect == array(item_type=text).serialize([10, 100, 1000])
expect = ['one', 'two', 'three']
assert expect == array(item_type=text).serialize(['one', 'two', 'three'])
expect = [10, 100, 1000]
arr = array(item_type=vector(length=3))
assert expect == arr.serialize(np.array(expect))
def test_array_deserialization():
expect = ['one', 'two', 'three']
assert expect == array(item_type=text).deserialize(['one', 'two', 'three'])
expect = np.array([10, 100, 1000])
arr = array(item_type=vector(length=3))
assert np.array_equal(expect, arr.deserialize(expect.tolist()))
# VECTOR -----------------------------------------------------------------------
def test_vector_to_dict():
description = 'A description about this variable.'
vec = vector(length=128, description=description, sampling_mean=0, sampling_std=1)
obj = vec.to_dict()
assert obj['type'] == 'vector'
assert obj['length'] == 128
assert obj['samplingMean'] == 0
assert obj['samplingStd'] == 1
assert obj['description'] == description
def test_vector_no_item_type():
with pytest.raises(MissingArgumentError):
vec = vector()
def test_vector_serialization():
zeros = np.zeros(128)
serialized = vector(length=128).serialize(zeros)
assert np.array_equal(np.array(zeros), serialized)
assert type(serialized) == list
def test_vector_deserialization():
zeros = np.zeros(128)
deserialized = vector(length=128).deserialize(zeros)
assert np.array_equal(zeros.tolist(), deserialized)
assert isinstance(deserialized, np.ndarray)
def test_vector_default():
vector_type = vector(length=5, sampling_mean=42)
assert np.array_equal(vector_type.default, [42, 42, 42, 42, 42])
def test_vector_invalid_default():
with pytest.raises(InvalidArgumentError):
vector_type = vector(length=5, default=[42, 42, 42, 42])
def test_vector_default_no_length_arg():
vector_type = vector(default=[42, 42])
assert vector_type.length == 2
# CATEGORY ---------------------------------------------------------------------
def test_category_to_dict():
description = 'A description about this variable.'
cat = category(choices=['one', 'two', 'three'], default='two', description=description)
obj = cat.to_dict()
assert obj['type'] == 'category'
assert obj['oneOf'] == ['one', 'two', 'three']
assert obj['default'] == 'two'
assert obj['description'] == description
def test_category_serialization():
cat = category(choices=['one', 'two', 'three'], default='two')
assert 'one' == cat.serialize('one')
def test_category_deserialization():
cat = category(choices=['one', 'two', 'three'], default='two')
assert 'one' == cat.deserialize('one')
def test_category_choices_none():
with pytest.raises(MissingArgumentError):
cat = category()
def test_category_choices_empty_arr():
with pytest.raises(MissingArgumentError):
cat = category(choices=[])
def test_category_default_not_in_choices():
with pytest.raises(InvalidArgumentError):
cat = category(choices=['one', 'two'], default='three')
def test_category_default_choice():
cat = category(choices=['one', 'two', 'three'], default='two')
assert cat.default == 'two'
def test_category_default_choice_is_first_if_not_specified():
cat = category(choices=['one', 'two', 'three'])
assert cat.default == 'one'
def test_category_deserialized_value_is_not_in_choices():
cat = category(choices=['one', 'two', 'three'])
with pytest.raises(InvalidArgumentError):
cat.deserialize('four')
# FILE -------------------------------------------------------------------------
def test_file_to_dict():
f = file()
obj = f.to_dict()
assert obj['type'] == 'file'
assert obj['description'] == None
def test_file_to_dict_directory():
description = 'A description about this variable.'
f = file(is_directory=True, description=description)
obj = f.to_dict()
assert obj['type'] == 'file'
assert obj['isDirectory'] == True
assert obj['description'] == description
def test_file_serialization_base():
f = file()
assert 'file.txt' == f.serialize('file.txt')
def test_file_serialization_relative():
f = file()
assert 'directory/file.txt' == f.serialize('directory/file.txt')
def test_file_serialization_absolute():
f = file()
assert '/home/user/file.txt' == f.serialize('/home/user/file.txt')
def test_file_serialization_remote():
f = file()
url = 'https://github.com/runwayml/model-sdk/archive/0.0.57.tar.gz'
assert url == f.serialize(url)
def test_file_serialization_base_directory():
f = file(is_directory=True)
assert 'directory' == f.serialize('directory')
def test_file_serialization_relative_directory():
f = file(is_directory=True)
assert 'directory/directory' == f.serialize('directory/directory')
def test_file_serialization_absolute_directory():
f = file(is_directory=True)
assert '/home/user/directory' == f.serialize('/home/user/directory')
def test_file_serialization_remote_directory():
f = file(is_directory=True)
url = 'https://github.com/runwayml/model-sdk/archive/0.0.57.tar.gz'
assert url == f.serialize(url)
def test_file_deserialization_base():
f = file()
assert 'README.md' == f.deserialize('README.md')
def test_file_deserialization_relative():
f = file()
assert 'runway/__init__.py' == f.deserialize('runway/__init__.py')
def test_file_deserialization_absolute():
absolute_path = os.path.abspath('README.md')
f = file()
assert absolute_path == f.deserialize(absolute_path)
def test_file_deserialization_not_exist():
with pytest.raises(InvalidArgumentError):
file().deserialize('file-that-does-not-exist.txt')
def test_file_deserialization_invalid_extension():
with pytest.raises(InvalidArgumentError):
file(extension='.txt').deserialize('README.md')
def test_file_deserialization_remote():
f = file()
url = 'https://raw.githubusercontent.com/runwayml/model-sdk/0.0.57/README.md'
path = f.deserialize(url)
assert os.path.exists(path)
check_expected_contents_for_0057_file_download(path)
def test_file_deserialization_base_directory():
f = file(is_directory=True)
assert 'runway' == f.deserialize('runway')
def test_file_deserialization_relative_directory():
f = file(is_directory=True)
assert 'docs/source' == f.deserialize('docs/source')
def test_file_deserialization_absolute_directory():
f = file(is_directory=True)
assert '/usr/bin' == f.deserialize('/usr/bin')
def test_file_deserialization_remote_directory():
f = file(is_directory=True)
url = 'https://github.com/runwayml/model-sdk/archive/0.0.57.tar.gz'
path = f.deserialize(url)
assert os.path.exists(path)
check_expected_contents_for_0057_tar_download(path)
# IMAGE ------------------------------------------------------------------------
def test_image_to_dict():
img = image(channels=3, min_width=128, min_height=128, max_width=512, max_height=512)
obj = img.to_dict()
assert obj['type'] == 'image'
assert obj['channels'] == 3
assert obj['minWidth'] == 128
assert obj['maxWidth'] == 512
assert obj['minHeight'] == 128
assert obj['maxHeight'] == 512
assert obj['description'] == None
def test_image_serialize_and_deserialize():
directory = os.path.dirname(os.path.realpath(__file__))
img = Image.open(os.path.join(directory, 'test_image.jpg'))
serialized_pil = image().serialize(img)
deserialized_pil = image().deserialize(serialized_pil)
assert issubclass(type(deserialized_pil), Image.Image)
serialize_np_img = image().serialize(np.asarray(img))
deserialize_np_img = image().deserialize(serialize_np_img)
assert issubclass(type(deserialize_np_img), Image.Image)
def test_image_serialize_invalid_type():
with pytest.raises(InvalidArgumentError):
image().serialize(True)
with pytest.raises(InvalidArgumentError):
image().serialize([])
with pytest.raises(InvalidArgumentError):
image().serialize('data:image/jpeg;base64,')
# SEGMENTATION ------------------------------------------------------------------------
def test_segmentation_to_dict():
seg = segmentation(label_to_id={"background": 0, "person": 1}, label_to_color={'background': [0, 0, 0]}, width=512, height=512)
obj = seg.to_dict()
assert obj['type'] == 'segmentation'
assert obj['labelToId'] == {"background": 0, "person": 1}
assert obj['labelToColor'] == {"background": [0, 0, 0], "person": [140, 59, 255]}
assert obj['description'] == None
def test_segmentation_serialize_and_deserialize():
directory = os.path.dirname(os.path.realpath(__file__))
img = Image.open(os.path.join(directory, 'test_segmentation.png'))
serialized_pil = segmentation(label_to_id={"background": 0, "person": 1}).serialize(img)
deserialized_pil = segmentation(label_to_id={"background": 0, "person": 1}).deserialize(serialized_pil)
assert issubclass(type(deserialized_pil), Image.Image)
def test_segmentation_no_label_to_id():
with pytest.raises(MissingArgumentError):
segmentation()
def test_segmentation_invalid_label_to_id():
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id={})
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id=[])
def test_segmentation_invalid_default_label():
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id={"background": 0, "person": 1}, default_label='building')
def test_segmentation_serialize_invalid_type():
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id={"background": 0, "person": 1}).serialize(True)
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id={"background": 0, "person": 1}).serialize([])
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id={"background": 0, "person": 1}).serialize('data:image/jpeg;base64,')
def test_segmentation_deserialize_invalid_type():
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id={"background": 0, "person": 1}).deserialize(True)
with pytest.raises(InvalidArgumentError):
segmentation(label_to_id={"background": 0, "person": 1}).deserialize('data:image/jpeg;base64,')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# batsky documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 25 18:58:55 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
import os
import re
import datetime
HERE = os.path.dirname(__file__)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BatSky'
copyright = u'2019, Olivier Richard'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
version_file = os.path.join(HERE, os.pardir,
'batsky', '__init__.py')
release = re.search("__version__ = '([^']+)'",
open(version_file).read().strip()).group(1)
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'batsky_doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'batsky.tex',
u'BatSky Documentation',
u'Olivier Richard', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'batsky',
u'BatSky Documentation',
[u'Olivier Richard'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'batsky',
u'BatSky Documentation',
u'Olivier Richard',
'batsky',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
#!/usr/bin/python3
'''Advent of Code 2018 Day 4 tests'''
import unittest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from aoc2018 import day04 # pylint: disable=wrong-import-position
class TestUM(unittest.TestCase):
'''Unit Tests'''
def test_day04(self) -> None:
'''Test part 1'''
with open('tests/day04test.txt', 'r') as f:
inputs = day04.readinputdata(f)
self.assertEqual(day04.runsolution(inputs), (240, 4455))
|
from rest_framework import serializers
from todo.models import Todo
class TodoSerializer(serializers.ModelSerializer):
class Meta:
model = Todo
fields = ('id', 'title', 'complete', 'created')
# from django.contrib.auth.models import User, Group
#
# class UserSerializer(serializers.ModelSerializer):
# class Meta:
# model = User
# fields = ('id', 'username', 'email', 'groups')
#
#
# class GroupSerializer(serializers.ModelSerializer):
# class Meta:
# model = Group
# fields = ('id', 'name')
|
import requests
import time
import json
import datetime
from utils import cookie_string_to_mapping, read_configuration_file
from login_bot import login_bot
from vote_bot import VoteBot
if __name__ == "__main__":
config = read_configuration_file()
username = config['credentials']['username']
password = config['credentials']['password']
participant = config['participant']
session = login_bot(username, password)
started_time = datetime.datetime.now()
bot = VoteBot(session, participant)
while True:
try:
bot.start_session()
bot.generate_captcha()
try:
bot.captcha_verification()
except Exception as e:
print('Captcha precisa ser adicionado na banco de imagens.')
continue
bot.vote()
print(f'Started time: {started_time}. Now: {datetime.datetime.now()}')
except requests.exceptions.ConnectionError as e:
print("[-] Servidor da Globo sobrecarregado... Tentando de novo em 1 minuto.")
time.sleep(60)
|
# -*- coding: utf-8 -*-
from ctypes import c_int, c_void_p
from functools import cached_property
import numpy as np
from pyfr.backends.base import BaseBackend
from pyfr.backends.openmp.compiler import OpenMPCompiler
class OpenMPBackend(BaseBackend):
name = 'openmp'
blocks = True
def __init__(self, cfg):
super().__init__(cfg)
# Take the default alignment requirement to be 64-bytes
self.alignb = cfg.getint('backend-openmp', 'alignb', 64)
if self.alignb < 32 or (self.alignb & (self.alignb - 1)):
raise ValueError('Alignment must be a power of 2 and >= 32')
# Compute the SoA and AoSoA size
self.soasz = self.alignb // np.dtype(self.fpdtype).itemsize
self.csubsz = self.soasz*cfg.getint('backend-openmp', 'n-soa', 1)
# C source compiler
self.compiler = OpenMPCompiler(cfg)
from pyfr.backends.openmp import (blasext, packing, provider, types,
xsmm)
# Register our data types and meta kernels
self.base_matrix_cls = types.OpenMPMatrixBase
self.const_matrix_cls = types.OpenMPConstMatrix
self.graph_cls = types.OpenMPGraph
self.matrix_cls = types.OpenMPMatrix
self.matrix_slice_cls = types.OpenMPMatrixSlice
self.view_cls = types.OpenMPView
self.xchg_matrix_cls = types.OpenMPXchgMatrix
self.xchg_view_cls = types.OpenMPXchgView
self.ordered_meta_kernel_cls = provider.OpenMPOrderedMetaKernel
self.unordered_meta_kernel_cls = provider.OpenMPUnorderedMetaKernel
# Instantiate mandatory kernel provider classes
kprovcls = [provider.OpenMPPointwiseKernelProvider,
blasext.OpenMPBlasExtKernels,
packing.OpenMPPackingKernels,
xsmm.OpenMPXSMMKernels]
self._providers = [k(self) for k in kprovcls]
# Pointwise kernels
self.pointwise = self._providers[0]
def run_kernels(self, kernels, wait=False):
for k in kernels:
k.run()
def run_graph(self, graph, wait=False):
graph.run()
@cached_property
def krunner(self):
ksrc = self.lookup.get_template('run-kernels').render()
klib = self.compiler.build(ksrc)
return klib.function('run_kernels', None, [c_int, c_int, c_void_p])
def _malloc_impl(self, nbytes):
data = np.zeros(nbytes + self.alignb, dtype=np.uint8)
offset = -data.ctypes.data % self.alignb
return data[offset:nbytes + offset]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import re
__all__ = ['HTTP', 'redirect']
defined_status = {
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
422: 'UNPROCESSABLE ENTITY',
429: 'TOO MANY REQUESTS',
451: 'UNAVAILABLE FOR LEGAL REASONS', # http://www.451unavailable.org/
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
509: 'BANDWIDTH LIMIT EXCEEDED',
}
regex_status = re.compile('^\d{3} [0-9A-Z ]+$')
class HTTP(Exception):
def __init__(
self,
status,
body='',
cookies=None,
**headers
):
self.status = status
self.body = body
self.headers = headers
self.cookies2headers(cookies)
def cookies2headers(self, cookies):
if cookies and len(cookies) > 0:
self.headers['Set-Cookie'] = [
str(cookie)[11:] for cookie in cookies.values()]
def to(self, responder, env=None):
env = env or {}
status = self.status
headers = self.headers
if status in defined_status:
status = '%d %s' % (status, defined_status[status])
elif isinstance(status, int):
status = '%d UNKNOWN ERROR' % status
else:
status = str(status)
if not regex_status.match(status):
status = '500 %s' % (defined_status[500])
headers.setdefault('Content-Type', 'text/html; charset=UTF-8')
body = self.body
if status[:1] == '4':
if not body:
body = status
if isinstance(body, str):
headers['Content-Length'] = len(body)
rheaders = []
for k, v in headers.iteritems():
if isinstance(v, list):
rheaders += [(k, str(item)) for item in v]
elif not v is None:
rheaders.append((k, str(v)))
responder(status, rheaders)
if env.get('request_method', '') == 'HEAD':
return ['']
elif isinstance(body, str):
return [body]
elif hasattr(body, '__iter__'):
return body
else:
return [str(body)]
@property
def message(self):
"""
compose a message describing this exception
"status defined_status [web2py_error]"
message elements that are not defined are omitted
"""
msg = '%(status)s'
if self.status in defined_status:
msg = '%(status)s %(defined_status)s'
if 'web2py_error' in self.headers:
msg += ' [%(web2py_error)s]'
return msg % dict(
status=self.status,
defined_status=defined_status.get(self.status),
web2py_error=self.headers.get('web2py_error'))
def __str__(self):
"stringify me"
return self.message
def redirect(location='', how=303, client_side=False):
if location:
from gluon import current
loc = location.replace('\r', '%0D').replace('\n', '%0A')
if client_side and current.request.ajax:
raise HTTP(200, **{'web2py-redirect-location': loc})
else:
raise HTTP(how,
'You are being redirected <a href="%s">here</a>' % loc,
Location=loc)
else:
from gluon import current
if client_side and current.request.ajax:
raise HTTP(200, **{'web2py-component-command': 'window.location.reload(true)'})
|
import unittest
import os
import requests_mock
import tableauserverclient as TSC
import xml.etree.ElementTree as ET
from tableauserverclient.datetime_helpers import format_datetime
from tableauserverclient.server.endpoint.exceptions import InternalServerError
from tableauserverclient.server.request_factory import RequestFactory
from tableauserverclient.models.permissions_item import PermissionsRule
from tableauserverclient.models.user_item import UserItem
from tableauserverclient.models.group_item import GroupItem
from ._utils import asset
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_add_tags.xml')
GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_by_id.xml')
GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_empty.xml')
GET_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get.xml')
POPULATE_CONNECTIONS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_connections.xml')
POPULATE_PDF = os.path.join(TEST_ASSET_DIR, 'populate_pdf.pdf')
POPULATE_PERMISSIONS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_permissions.xml')
POPULATE_PREVIEW_IMAGE = os.path.join(TEST_ASSET_DIR, 'RESTAPISample Image.png')
POPULATE_VIEWS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views.xml')
POPULATE_VIEWS_USAGE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_populate_views_usage.xml')
PUBLISH_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish.xml')
PUBLISH_ASYNC_XML = os.path.join(TEST_ASSET_DIR, 'workbook_publish_async.xml')
UPDATE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_update.xml')
UPDATE_PERMISSIONS = os.path.join(TEST_ASSET_DIR, 'workbook_update_permissions.xml')
class WorkbookTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
# Fake sign in
self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67'
self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM'
self.baseurl = self.server.workbooks.baseurl
def test_get(self):
with open(GET_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl, text=response_xml)
all_workbooks, pagination_item = self.server.workbooks.get()
self.assertEqual(2, pagination_item.total_available)
self.assertEqual('6d13b0ca-043d-4d42-8c9d-3f3313ea3a00', all_workbooks[0].id)
self.assertEqual('Superstore', all_workbooks[0].name)
self.assertEqual('Superstore', all_workbooks[0].content_url)
self.assertEqual(False, all_workbooks[0].show_tabs)
self.assertEqual(1, all_workbooks[0].size)
self.assertEqual('2016-08-03T20:34:04Z', format_datetime(all_workbooks[0].created_at))
self.assertEqual('2016-08-04T17:56:41Z', format_datetime(all_workbooks[0].updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[0].project_id)
self.assertEqual('default', all_workbooks[0].project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[0].owner_id)
self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', all_workbooks[1].id)
self.assertEqual('SafariSample', all_workbooks[1].name)
self.assertEqual('SafariSample', all_workbooks[1].content_url)
self.assertEqual(False, all_workbooks[1].show_tabs)
self.assertEqual(26, all_workbooks[1].size)
self.assertEqual('2016-07-26T20:34:56Z', format_datetime(all_workbooks[1].created_at))
self.assertEqual('2016-07-26T20:35:05Z', format_datetime(all_workbooks[1].updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', all_workbooks[1].project_id)
self.assertEqual('default', all_workbooks[1].project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', all_workbooks[1].owner_id)
self.assertEqual(set(['Safari', 'Sample']), all_workbooks[1].tags)
def test_get_before_signin(self):
self.server._auth_token = None
self.assertRaises(TSC.NotSignedInError, self.server.workbooks.get)
def test_get_empty(self):
with open(GET_EMPTY_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl, text=response_xml)
all_workbooks, pagination_item = self.server.workbooks.get()
self.assertEqual(0, pagination_item.total_available)
self.assertEqual([], all_workbooks)
def test_get_by_id(self):
with open(GET_BY_ID_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', text=response_xml)
single_workbook = self.server.workbooks.get_by_id('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d42', single_workbook.id)
self.assertEqual('SafariSample', single_workbook.name)
self.assertEqual('SafariSample', single_workbook.content_url)
self.assertEqual(False, single_workbook.show_tabs)
self.assertEqual(26, single_workbook.size)
self.assertEqual('2016-07-26T20:34:56Z', format_datetime(single_workbook.created_at))
self.assertEqual('2016-07-26T20:35:05Z', format_datetime(single_workbook.updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', single_workbook.project_id)
self.assertEqual('default', single_workbook.project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', single_workbook.owner_id)
self.assertEqual(set(['Safari', 'Sample']), single_workbook.tags)
self.assertEqual('d79634e1-6063-4ec9-95ff-50acbf609ff5', single_workbook.views[0].id)
self.assertEqual('ENDANGERED SAFARI', single_workbook.views[0].name)
self.assertEqual('SafariSample/sheets/ENDANGEREDSAFARI', single_workbook.views[0].content_url)
def test_get_by_id_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.get_by_id, '')
def test_delete(self):
with requests_mock.mock() as m:
m.delete(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d42', status_code=204)
self.server.workbooks.delete('3cc6cd06-89ce-4fdc-b935-5294135d6d42')
def test_delete_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.delete, '')
def test_update(self):
with open(UPDATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=response_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74', show_tabs=True)
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
single_workbook.owner_id = 'dd2239f6-ddf1-4107-981a-4cf94e415794'
single_workbook.name = 'renamedWorkbook'
single_workbook.materialized_views_config = {'materialized_views_enabled': True,
'run_materialization_now': False}
single_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual('1f951daf-4061-451a-9df1-69a8062664f2', single_workbook.id)
self.assertEqual(True, single_workbook.show_tabs)
self.assertEqual('1d0304cd-3796-429f-b815-7258370b9b74', single_workbook.project_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', single_workbook.owner_id)
self.assertEqual('renamedWorkbook', single_workbook.name)
self.assertEqual(True, single_workbook.materialized_views_config['materialized_views_enabled'])
self.assertEqual(False, single_workbook.materialized_views_config['run_materialization_now'])
def test_update_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.update, single_workbook)
def test_update_copy_fields(self):
with open(POPULATE_CONNECTIONS_XML, 'rb') as f:
connection_xml = f.read().decode('utf-8')
with open(UPDATE_XML, 'rb') as f:
update_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=connection_xml)
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_connections(single_workbook)
updated_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual(single_workbook._connections, updated_workbook._connections)
self.assertEqual(single_workbook._views, updated_workbook._views)
self.assertEqual(single_workbook.tags, updated_workbook.tags)
self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags)
self.assertEqual(single_workbook._preview_image, updated_workbook._preview_image)
def test_update_tags(self):
with open(ADD_TAGS_XML, 'rb') as f:
add_tags_xml = f.read().decode('utf-8')
with open(UPDATE_XML, 'rb') as f:
update_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags', text=add_tags_xml)
m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/b', status_code=204)
m.delete(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/tags/d', status_code=204)
m.put(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2', text=update_xml)
single_workbook = TSC.WorkbookItem('1d0304cd-3796-429f-b815-7258370b9b74')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
single_workbook._initial_tags.update(['a', 'b', 'c', 'd'])
single_workbook.tags.update(['a', 'c', 'e'])
updated_workbook = self.server.workbooks.update(single_workbook)
self.assertEqual(single_workbook.tags, updated_workbook.tags)
self.assertEqual(single_workbook._initial_tags, updated_workbook._initial_tags)
def test_download(self):
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content',
headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'})
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2')
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_sanitizes_name(self):
filename = "Name,With,Commas.twbx"
disposition = 'name="tableau_workbook"; filename="{}"'.format(filename)
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content',
headers={'Content-Disposition': disposition})
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2')
self.assertEqual(os.path.basename(file_path), "NameWithCommas.twbx")
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_extract_only(self):
# Pretend we're 2.5 for 'extract_only'
self.server.version = "2.5"
self.baseurl = self.server.workbooks.baseurl
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/content?includeExtract=False',
headers={'Content-Disposition': 'name="tableau_workbook"; filename="RESTAPISample.twbx"'},
complete_qs=True)
# Technically this shouldn't download a twbx, but we are interested in the qs, not the file
file_path = self.server.workbooks.download('1f951daf-4061-451a-9df1-69a8062664f2', include_extract=False)
self.assertTrue(os.path.exists(file_path))
os.remove(file_path)
def test_download_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.download, '')
def test_populate_views(self):
with open(POPULATE_VIEWS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_views(single_workbook)
views_list = single_workbook.views
self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id)
self.assertEqual('GDP per capita', views_list[0].name)
self.assertEqual('RESTAPISample/sheets/GDPpercapita', views_list[0].content_url)
self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id)
self.assertEqual('Country ranks', views_list[1].name)
self.assertEqual('RESTAPISample/sheets/Countryranks', views_list[1].content_url)
self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id)
self.assertEqual('Interest rates', views_list[2].name)
self.assertEqual('RESTAPISample/sheets/Interestrates', views_list[2].content_url)
def test_populate_views_with_usage(self):
with open(POPULATE_VIEWS_USAGE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/views?includeUsageStatistics=true',
text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_views(single_workbook, usage=True)
views_list = single_workbook.views
self.assertEqual('097dbe13-de89-445f-b2c3-02f28bd010c1', views_list[0].id)
self.assertEqual(2, views_list[0].total_views)
self.assertEqual('2c1ab9d7-8d64-4cc6-b495-52e40c60c330', views_list[1].id)
self.assertEqual(37, views_list[1].total_views)
self.assertEqual('0599c28c-6d82-457e-a453-e52c1bdb00f5', views_list[2].id)
self.assertEqual(0, views_list[2].total_views)
def test_populate_views_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError, self.server.workbooks.populate_views, single_workbook)
def test_populate_connections(self):
with open(POPULATE_CONNECTIONS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/connections', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_connections(single_workbook)
self.assertEqual('37ca6ced-58d7-4dcf-99dc-f0a85223cbef', single_workbook.connections[0].id)
self.assertEqual('dataengine', single_workbook.connections[0].connection_type)
self.assertEqual('4506225a-0d32-4ab1-82d3-c24e85f7afba', single_workbook.connections[0].datasource_id)
self.assertEqual('World Indicators', single_workbook.connections[0].datasource_name)
def test_populate_permissions(self):
with open(POPULATE_PERMISSIONS_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.get(self.baseurl + '/21778de4-b7b9-44bc-a599-1506a2639ace/permissions', text=response_xml)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '21778de4-b7b9-44bc-a599-1506a2639ace'
self.server.workbooks.populate_permissions(single_workbook)
permissions = single_workbook.permissions
self.assertEqual(permissions[0].grantee.tag_name, 'group')
self.assertEqual(permissions[0].grantee.id, '5e5e1978-71fa-11e4-87dd-7382f5c437af')
self.assertDictEqual(permissions[0].capabilities, {
TSC.Permission.Capability.WebAuthoring: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Filter: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.AddComment: TSC.Permission.Mode.Allow
})
self.assertEqual(permissions[1].grantee.tag_name, 'user')
self.assertEqual(permissions[1].grantee.id, '7c37ee24-c4b1-42b6-a154-eaeab7ee330a')
self.assertDictEqual(permissions[1].capabilities, {
TSC.Permission.Capability.ExportImage: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.ShareView: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.ExportData: TSC.Permission.Mode.Deny,
TSC.Permission.Capability.ViewComments: TSC.Permission.Mode.Deny
})
def test_add_permissions(self):
with open(UPDATE_PERMISSIONS, 'rb') as f:
response_xml = f.read().decode('utf-8')
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '21778de4-b7b9-44bc-a599-1506a2639ace'
bob = UserItem.as_reference("7c37ee24-c4b1-42b6-a154-eaeab7ee330a")
group_of_people = GroupItem.as_reference("5e5e1978-71fa-11e4-87dd-7382f5c437af")
new_permissions = [
PermissionsRule(bob, {'Write': 'Allow'}),
PermissionsRule(group_of_people, {'Read': 'Deny'})
]
with requests_mock.mock() as m:
m.put(self.baseurl + "/21778de4-b7b9-44bc-a599-1506a2639ace/permissions", text=response_xml)
permissions = self.server.workbooks.update_permissions(single_workbook, new_permissions)
self.assertEqual(permissions[0].grantee.tag_name, 'group')
self.assertEqual(permissions[0].grantee.id, '5e5e1978-71fa-11e4-87dd-7382f5c437af')
self.assertDictEqual(permissions[0].capabilities, {
TSC.Permission.Capability.Read: TSC.Permission.Mode.Deny
})
self.assertEqual(permissions[1].grantee.tag_name, 'user')
self.assertEqual(permissions[1].grantee.id, '7c37ee24-c4b1-42b6-a154-eaeab7ee330a')
self.assertDictEqual(permissions[1].capabilities, {
TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow
})
def test_populate_connections_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError,
self.server.workbooks.populate_connections,
single_workbook)
def test_populate_pdf(self):
self.server.version = "3.4"
self.baseurl = self.server.workbooks.baseurl
with open(POPULATE_PDF, "rb") as f:
response = f.read()
with requests_mock.mock() as m:
m.get(self.baseurl + "/1f951daf-4061-451a-9df1-69a8062664f2/pdf?type=a5&orientation=landscape",
content=response)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
type = TSC.PDFRequestOptions.PageType.A5
orientation = TSC.PDFRequestOptions.Orientation.Landscape
req_option = TSC.PDFRequestOptions(type, orientation)
self.server.workbooks.populate_pdf(single_workbook, req_option)
self.assertEqual(response, single_workbook.pdf)
def test_populate_preview_image(self):
with open(POPULATE_PREVIEW_IMAGE, 'rb') as f:
response = f.read()
with requests_mock.mock() as m:
m.get(self.baseurl + '/1f951daf-4061-451a-9df1-69a8062664f2/previewImage', content=response)
single_workbook = TSC.WorkbookItem('test')
single_workbook._id = '1f951daf-4061-451a-9df1-69a8062664f2'
self.server.workbooks.populate_preview_image(single_workbook)
self.assertEqual(response, single_workbook.preview_image)
def test_populate_preview_image_missing_id(self):
single_workbook = TSC.WorkbookItem('test')
self.assertRaises(TSC.MissingRequiredFieldError,
self.server.workbooks.populate_preview_image,
single_workbook)
def test_publish(self):
with open(PUBLISH_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl, text=response_xml)
new_workbook = TSC.WorkbookItem(name='Sample',
show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
sample_workbook = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx')
publish_mode = self.server.PublishMode.CreateNew
new_workbook = self.server.workbooks.publish(new_workbook,
sample_workbook,
publish_mode)
self.assertEqual('a8076ca1-e9d8-495e-bae6-c684dbb55836', new_workbook.id)
self.assertEqual('RESTAPISample', new_workbook.name)
self.assertEqual('RESTAPISample_0', new_workbook.content_url)
self.assertEqual(False, new_workbook.show_tabs)
self.assertEqual(1, new_workbook.size)
self.assertEqual('2016-08-18T18:33:24Z', format_datetime(new_workbook.created_at))
self.assertEqual('2016-08-18T20:31:34Z', format_datetime(new_workbook.updated_at))
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', new_workbook.project_id)
self.assertEqual('default', new_workbook.project_name)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', new_workbook.owner_id)
self.assertEqual('fe0b4e89-73f4-435e-952d-3a263fbfa56c', new_workbook.views[0].id)
self.assertEqual('GDP per capita', new_workbook.views[0].name)
self.assertEqual('RESTAPISample_0/sheets/GDPpercapita', new_workbook.views[0].content_url)
def test_publish_async(self):
with open(PUBLISH_ASYNC_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl, text=response_xml)
new_workbook = TSC.WorkbookItem(name='Sample',
show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
sample_workbook = os.path.join(TEST_ASSET_DIR, 'SampleWB.twbx')
publish_mode = self.server.PublishMode.CreateNew
new_job = self.server.workbooks.publish(new_workbook,
sample_workbook,
publish_mode,
as_job=True)
self.assertEqual('7c3d599e-949f-44c3-94a1-f30ba85757e4', new_job.id)
self.assertEqual('PublishWorkbook', new_job.type)
self.assertEqual('0', new_job.progress)
self.assertEqual('2018-06-29T23:22:32Z', format_datetime(new_job.created_at))
self.assertEqual('1', new_job.finish_code)
def test_publish_invalid_file(self):
new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
self.assertRaises(IOError, self.server.workbooks.publish, new_workbook, '.',
self.server.PublishMode.CreateNew)
def test_publish_invalid_file_type(self):
new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
self.assertRaises(ValueError, self.server.workbooks.publish,
new_workbook, os.path.join(TEST_ASSET_DIR, 'SampleDS.tds'),
self.server.PublishMode.CreateNew)
def test_publish_multi_connection(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection1 = TSC.ConnectionItem()
connection1.server_address = 'mysql.test.com'
connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
connection2 = TSC.ConnectionItem()
connection2.server_address = 'pgsql.test.com'
connection2.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
response = RequestFactory.Workbook._generate_xml(new_workbook, connections=[connection1, connection2])
# Can't use ConnectionItem parser due to xml namespace problems
connection_results = ET.fromstring(response).findall('.//connection')
self.assertEqual(connection_results[0].get('serverAddress', None), 'mysql.test.com')
self.assertEqual(connection_results[0].find('connectionCredentials').get('name', None), 'test')
self.assertEqual(connection_results[1].get('serverAddress', None), 'pgsql.test.com')
self.assertEqual(connection_results[1].find('connectionCredentials').get('password', None), 'secret')
def test_publish_single_connection(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection_creds = TSC.ConnectionCredentials('test', 'secret', True)
response = RequestFactory.Workbook._generate_xml(new_workbook, connection_credentials=connection_creds)
# Can't use ConnectionItem parser due to xml namespace problems
credentials = ET.fromstring(response).findall('.//connectionCredentials')
self.assertEqual(len(credentials), 1)
self.assertEqual(credentials[0].get('name', None), 'test')
self.assertEqual(credentials[0].get('password', None), 'secret')
self.assertEqual(credentials[0].get('embed', None), 'true')
def test_credentials_and_multi_connect_raises_exception(self):
new_workbook = TSC.WorkbookItem(name='Sample', show_tabs=False,
project_id='ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
connection_creds = TSC.ConnectionCredentials('test', 'secret', True)
connection1 = TSC.ConnectionItem()
connection1.server_address = 'mysql.test.com'
connection1.connection_credentials = TSC.ConnectionCredentials('test', 'secret', True)
with self.assertRaises(RuntimeError):
response = RequestFactory.Workbook._generate_xml(new_workbook,
connection_credentials=connection_creds,
connections=[connection1])
def test_synchronous_publish_timeout_error(self):
with requests_mock.mock() as m:
m.register_uri('POST', self.baseurl, status_code=504)
new_workbook = TSC.WorkbookItem(project_id='')
publish_mode = self.server.PublishMode.CreateNew
self.assertRaisesRegexp(InternalServerError, 'Please use asynchronous publishing to avoid timeouts',
self.server.workbooks.publish, new_workbook, asset('SampleWB.twbx'), publish_mode)
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""Return the recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""Create a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name="Cinnamon"):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated API requests"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test that authentication is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated API requests"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrive_recipes(self):
"""Test retrieving a list of recipes"""
# We're testing that we can retrive the recipes from the database
# This is why we don't need to access them here.
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
"other@test.com",
"password123"
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Chocolate Cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test create recipe with tags"""
tag1 = sample_tag(user=self.user, name='vegan')
tag2 = sample_tag(user=self.user, name='dessert')
payload = {
'title': 'Avacado Lime Cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
# DEBUG: res.status_code returns 400 not 201
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai Prawn Red Curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""test updating a recipe with PATCH method"""
# NOTE: update functionality is available out of the box with
# django rest framework. Hence, you shouldn't really need
# to write tests for it (the Django devs should do that),
# but this is here as an illustrative example.
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken Tikka', 'tags': [new_tag.id]}
# our helper function for returning the detail recipe url
url = detail_url(recipe.id)
# call PATCH method to client
self.client.patch(url, payload)
# refresh the model object with new data
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""test full update of recipe"""
# NOTE: update functionality is available out of the box with
# django rest framework. Hence, you shouldn't really need
# to write tests for it (the Django devs should do that),
# but this is here as an illustrative example.
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spagetti Carbonara',
'time_minutes': 20,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
# refresh our recipe model object
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
# assert that the recipe contains no tags, since we're using
# PUT method, and not providing new tags, there should be none.
self.assertEqual(len(tags), 0)
|
# %%
#######################################
def new_objects_from_csv(csv_file: str):
"""For each row in the .csv, returns a list of custom objects with properties/attributes corresponding to the header names of each column.
Example:
>>> results = new_objects_from_csv('brandnew.csv')\n
>>> pprint(results)\n
[CustObj(NAME='bob', AGE='21', JOB=' janitor', DEPARTMENT=' sanitization team', PAY='2'),\n
CustObj(NAME='alice', AGE='22', JOB=' secretary', DEPARTMENT=' admin team', PAY='3'),\n
CustObj(NAME='chuck', AGE='23', JOB=' plumber', DEPARTMENT=' construction team', PAY='4')]\n
Reference:
# I retrieved the full body of the code below from here; this article has some great info
https://realpython.com/python-namedtuple/\n
Args:
csv_file (str): Reference an existing .csv.
Returns:
object: Returns a custom object.
"""
import csv
from collections import namedtuple
results_list = []
with open(csv_file, 'r') as f:
reader = csv.reader(f)
CustObj = namedtuple('CustObj', next(reader), rename=True)
for row in reader:
myobj = CustObj(*row)
results_list.append(myobj)
return results_list
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Progress Bars
=============
Use or override one of the progress bar callbacks.
"""
from pytorch_lightning.callbacks.progress.base import ProgressBarBase # noqa: F401
from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBar # noqa: F401
from pytorch_lightning.callbacks.progress.tqdm_progress import TQDMProgressBar # noqa: F401
|
import re
from phising.blob_storage_operations.blob_operations import Blob_Operation
from utils.logger import App_Logger
from utils.read_params import read_params
class Raw_Train_Data_Validation:
"""
Description : This method is used for validating the raw training data
Version : 1.2
Revisions : moved to setup to cloud
"""
def __init__(self, raw_data_container_name):
self.config = read_params()
self.db_name = self.config["db_log"]["train"]
self.raw_data_container_name = raw_data_container_name
self.log_writer = App_Logger()
self.class_name = self.__class__.__name__
self.blob = Blob_Operation()
self.train_data_container = self.config["container"]["train_data"]
self.input_files_container = self.config["container"]["input_files"]
self.raw_train_data_dir = self.config["data"]["raw_data"]["train"]
self.train_schema_file = self.config["schema_file"]["train"]
self.regex_file = self.config["regex_file"]
self.train_schema_log = self.config["train_db_log"]["values_from_schema"]
self.good_train_data_dir = self.config["data"]["train"]["good"]
self.bad_train_data_dir = self.config["data"]["train"]["bad"]
self.train_gen_log = self.config["train_db_log"]["general"]
self.train_name_valid_log = self.config["train_db_log"]["name_validation"]
self.train_col_valid_log = self.config["train_db_log"]["col_validation"]
self.train_missing_value_log = self.config["train_db_log"][
"missing_values_in_col"
]
def values_from_schema(self):
"""
Method Name : values_from_schema
Description : This method is used for getting values from schema_training.json
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.values_from_schema.__name__
try:
self.log_writer.start_log(
key="start",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_schema_log,
)
dic = self.blob.read_json(
file_name=self.train_schema_file,
container_name=self.input_files_container,
db_name=self.db_name,
collection_name=self.train_schema_log,
)
LengthOfDateStampInFile = dic["LengthOfDateStampInFile"]
LengthOfTimeStampInFile = dic["LengthOfTimeStampInFile"]
column_names = dic["ColName"]
NumberofColumns = dic["NumberofColumns"]
message = (
"LengthOfDateStampInFile: %s" % LengthOfDateStampInFile
+ "\t"
+ "LengthOfTimeStampInFile: %s" % LengthOfTimeStampInFile
+ "\t "
+ "NumberofColumns: %s" % NumberofColumns
+ "\n"
)
self.log_writer.log(
db_name=self.db_name,
collection_name=self.train_schema_log,
log_info=message,
)
self.log_writer.start_log(
key="exit",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_schema_log,
)
except Exception as e:
self.log_writer.exception_log(
error=e,
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_schema_log,
)
return (
LengthOfDateStampInFile,
LengthOfTimeStampInFile,
column_names,
NumberofColumns,
)
def get_regex_pattern(self):
"""
Method Name : get_regex_pattern
Description : This method is used for getting regex pattern for file validation
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.get_regex_pattern.__name__
try:
self.log_writer.start_log(
key="start",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_gen_log,
)
regex = self.blob.read_text(
file_name=self.regex_file,
container_name=self.input_files_container,
db_name=self.db_name,
collection_name=self.train_gen_log,
)
self.log_writer.log(
db_name=self.db_name,
collection_name=self.train_gen_log,
log_info=f"Got {regex} pattern",
)
self.log_writer.start_log(
key="exit",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_gen_log,
)
return regex
except Exception as e:
self.log_writer.exception_log(
error=e,
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_gen_log,
)
def validate_raw_file_name(
self, regex, LengthOfDateStampInFile, LengthOfTimeStampInFile
):
"""
Method Name : validate_raw_file_name
Description : This method is used for validating raw file name based on the regex pattern
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.validate_raw_file_name.__name__
self.log_writer.start_log(
key="start",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
try:
onlyfiles = self.blob.get_files_from_folder(
folder_name=self.raw_train_data_dir,
container_name=self.raw_data_container_name,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
train_batch_files = [f.split("/")[1] for f in onlyfiles]
self.log_writer.log(
db_name=self.db_name,
collection_name=self.train_name_valid_log,
log_info="Got training files with exact name",
)
for filename in train_batch_files:
raw_data_train_filename = self.raw_train_data_dir + "/" + filename
good_data_train_filename = self.good_train_data_dir + "/" + filename
bad_data_train_filename = self.bad_train_data_dir + "/" + filename
self.log_writer.log(
db_name=self.db_name,
collection_name=self.train_name_valid_log,
log_info="Created raw,good and bad data filenames",
)
if re.match(regex, filename):
splitAtDot = re.split(".csv", filename)
splitAtDot = re.split("_", splitAtDot[0])
if len(splitAtDot[1]) == LengthOfDateStampInFile:
if len(splitAtDot[2]) == LengthOfTimeStampInFile:
self.blob.copy_data(
from_file_name=raw_data_train_filename,
from_container_name=self.train_data_container,
to_file_name=good_data_train_filename,
to_container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
else:
self.blob.copy_data(
from_file_name=raw_data_train_filename,
from_container_name=self.raw_data_container_name,
to_file_name=bad_data_train_filename,
to_container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
else:
self.blob.copy_data(
from_file_name=raw_data_train_filename,
from_container_name=self.raw_data_container_name,
to_file_name=bad_data_train_filename,
to_container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
else:
self.blob.copy_data(
from_file_name=raw_data_train_filename,
from_container_name=self.train_data_container,
to_file_name=bad_data_train_filename,
to_container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
self.log_writer.start_log(
key="exit",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
except Exception as e:
self.log_writer.exception_log(
error=e,
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_name_valid_log,
)
def validate_col_length(self, NumberofColumns):
"""
Method Name : validate_col_length
Description : This method is used for validating the column length of the csv file
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.validate_col_length.__name__
self.log_writer.start_log(
key="start",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_col_valid_log,
)
try:
lst = self.blob.read_csv_from_folder(
folder_name=self.good_train_data_dir,
container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_col_valid_log,
)
for f in lst:
df = f[0]
file = f[1]
abs_f = f[2]
if file.endswith(".csv"):
if df.shape[1] == NumberofColumns:
pass
else:
dest_f = self.bad_train_data_dir + "/" + abs_f
self.blob.move_data(
from_file_name=file,
from_container_name=self.train_data_container,
to_file_name=dest_f,
to_container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_col_valid_log,
)
else:
pass
self.log_writer.start_log(
key="exit",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_col_valid_log,
)
except Exception as e:
self.log_writer.exception_log(
error=e,
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_col_valid_log,
)
def validate_missing_values_in_col(self):
"""
Method Name : validate_missing_values_in_col
Description : This method is used for validating the missing values in columns
Version : 1.2
Revisions : moved setup to cloud
"""
method_name = self.validate_missing_values_in_col.__name__
self.log_writer.start_log(
key="start",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_missing_value_log,
)
try:
lst = self.blob.read_csv_from_folder(
folder_name=self.good_train_data_dir,
container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_missing_value_log,
)
for f in lst:
df = f[0]
file = f[1]
abs_f = f[2]
if abs_f.endswith(".csv"):
count = 0
for cols in df:
if (len(df[cols]) - df[cols].count()) == len(df[cols]):
count += 1
dest_f = self.bad_train_data_dir + "/" + abs_f
self.blob.move_data(
from_file_name=file,
from_container_name=self.train_data_container,
to_file_name=dest_f,
to_container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_missing_value_log,
)
break
if count == 0:
dest_f = self.good_train_data_dir + "/" + abs_f
self.blob.upload_df_as_csv(
dataframe=df,
local_file_name=abs_f,
container_file_name=dest_f,
container_name=self.train_data_container,
db_name=self.db_name,
collection_name=self.train_missing_value_log,
)
else:
pass
self.log_writer.start_log(
key="exit",
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_missing_value_log,
)
except Exception as e:
self.log_writer.exception_log(
error=e,
class_name=self.class_name,
method_name=method_name,
db_name=self.db_name,
collection_name=self.train_missing_value_log,
)
|
#!/usr/bin/env python
#
# Copyright, Michael Vittrup Larsen
# Origin: https://github.com/MichaelVL/oidc-oauth2-workshop
import os
import flask
from flask_cors import CORS
import datetime
import json
import uuid
import urllib
import base64
import hashlib
import logging
from authlib.jose import jwt, jwk, JsonWebKey
app = flask.Flask('oauth2-server')
CORS(app, resources={r"/*": {"origins": "*"}})
auth_context = dict()
code_metadata = dict()
sessions = dict()
jwt_key = os.getenv('JWT_KEY', 'jwt-key')
app_port = int(os.getenv('APP_PORT', '5001'))
own_base_url = os.getenv('APP_BASE_URL', 'http://127.0.0.1:5001')
api_base_url = os.getenv('API_BASE_URL', 'http://127.0.0.1:5002/api')
SESSION_COOKIE_NAME='session'
cfg_access_token_lifetime = int(os.getenv('ACCESS_TOKEN_LIFETIME', '1200'))
cfg_refresh_token_lifetime = int(os.getenv('REFRESH_TOKEN_LIFETIME', '3600'))
logging.basicConfig()
log = logging.getLogger('oauth2-server')
log.setLevel(logging.DEBUG)
with open(jwt_key, 'rb') as f:
key_data = f.read()
signing_key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
with open(jwt_key+'.pub', 'rb') as f:
key_data = f.read()
signing_key_pub = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
signing_key_pub['kid'] = signing_key_pub.thumbprint()
def get_session_by_subject(sub):
for session_id in sessions.keys():
if sessions[session_id]['subject'] == sub:
return session_id
return None
def get_client_session_by_id(session, client_id):
for cs in session['client_sessions']:
if cs['client_id']==client_id:
return cs
return None
def build_url(url, **kwargs):
return '{}?{}'.format(url, urllib.parse.urlencode(kwargs))
def issue_token(subject, audience, claims, expiry):
claims['sub'] = subject
claims['iss'] = own_base_url
claims['aud'] = audience
claims['iat'] = datetime.datetime.utcnow()
claims['exp'] = expiry
header = {'alg': 'RS256', 'kid': signing_key_pub['kid'] }
token = jwt.encode(header, claims, signing_key).decode("ascii")
return token
def log_request(prefix, req):
'''Log a Flask HTTP request (header and body)'''
data = req.get_data()
log.debug('{} # {} {}'.format(prefix, req.method, req.path))
for hdr in req.headers:
log.debug('{} # {}: {}'.format(prefix, hdr[0], hdr[1]))
log.debug(prefix+' #')
for ln in data.decode("ascii").split('\n'):
log.debug('{} # {}'.format(prefix, ln))
@app.route('/', methods=['GET'])
def index():
return flask.render_template('index.html', sessions=sessions)
@app.route('/<path:text>', methods=['GET'])
def all_routes(text):
log.info("Path '{}'".format(text))
if text in ['style.css']:
return flask.Response(flask.render_template(text), mimetype='text/css')
@app.route('/logout', methods=['POST'])
def logout():
req = flask.request
session_id = req.form.get('sessionid')
log.info('Logout, session: {}'.format(session_id))
global sessions
del sessions[session_id]
resp = flask.make_response(flask.redirect(own_base_url, code=303))
return resp
@app.route('/authorize', methods=['GET', 'POST'])
def authorize():
# TODO: Validate client-id and redirection URL
req = flask.request
client_id = req.values.get('client_id')
scope = req.values.get('scope')
redirect_uri = req.values.get('redirect_uri')
state = req.values.get('state')
nonce = req.values.get('nonce')
prompt = req.values.get('prompt')
code_challenge_method = req.values.get('code_challenge_method')
code_challenge = req.values.get('code_challenge')
reqid = str(uuid.uuid4())
session_cookie = req.cookies.get(SESSION_COOKIE_NAME)
log.info('Session cookie: {}'.format(session_cookie))
if session_cookie in sessions:
log.info('This is an existing session identified by the session cookie, short-cutting login process...')
return issue_code_and_redirect(sessions[session_cookie], client_id, state, nonce)
else:
log.info('No session cookie')
if prompt == 'none':
id_token_hint = req.form.get('id_token_hint')
id_token_claims = jwt.decode(id_token_hint, signing_key_pub)
log.info('ID token hint claims: {}'.format(id_token_claims))
if own_base_url not in id_token_claims['aud']:
log.error('ID token hint not for us')
redir_url = build_url(redirect_uri, error='login_required', state=state)
response = flask.make_response(flask.redirect(redir_url, code=303))
return response
existing_session_id = get_session_by_subject(id_token_claims['sub'])
if existing_session_id:
log.info('Found existing session {}'.format(existing_session_id))
# FIXME
return issue_code_and_redirect(sessions[existing_session_id], client_id, state, nonce)
else:
# FIXME
log.info('No existing session found')
data = {'error': 'login_required'}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
response = flask.make_response(data, 403, headers)
return response
global auth_context
auth_context[reqid] = {'scope': scope,
'client_id': client_id,
'redirect_uri': redirect_uri,
'state': state,
'nonce': nonce,
'code_challenge': code_challenge,
'code_challenge_method': code_challenge_method,
}
log.info("AUTHENTICATE: Requesting login. Scope: '{}', client-id: '{}', state: {}, using request id: {}".format(scope, client_id, state, reqid))
return flask.render_template('authenticate.html', reqid=reqid)
@app.route('/login', methods=['POST'])
def login():
req = flask.request
reqid = req.form.get('reqid')
subject = req.form.get('username')
password = req.form.get('password')
if password != 'valid':
return flask.render_template('error.html', text='Authentication error')
scope = auth_context[reqid]['scope']
client_id = auth_context[reqid]['client_id']
state = auth_context[reqid]['state']
auth_context[reqid]['subject'] = subject
log.info("LOGIN: Requesting authorization. Scope: '{}', client-id: '{}', state: {}, using request id: {}".format(scope, client_id, state, reqid))
return flask.render_template('authorize.html', client_id=client_id, scope=scope, reqid=reqid)
@app.route('/approve', methods=['POST'])
def approve():
req = flask.request
reqid = req.form.get('reqid')
subject = auth_context[reqid]['subject']
log.info("APPROVE: User: '{}', request id: {}".format(subject, reqid))
if not 'approve' in req.form:
return flask.render_template('error.html', text='Not approved')
# TODO: Check age of request
if reqid not in auth_context.keys():
return flask.render_template('error.html', text='Unknown request ID')
auth_ctx = auth_context[reqid]
del auth_context[reqid] # Auth request only valid once
# TODO: validate scope is allowed for client
log.info("User: '{}' authorized scope: '{}' for client_id: '{}'".format(subject, auth_ctx['scope'], auth_ctx['client_id']))
existing_session_id = get_session_by_subject(subject)
if existing_session_id:
session_id = existing_session_id
else:
session_id = str(uuid.uuid4())
session = {'subject': subject,
'session_id': session_id,
'client_sessions': [
{
'client_id': auth_ctx['client_id'],
'scope': auth_ctx['scope'],
'redirect_uri': auth_ctx['redirect_uri'],
'code_challenge': auth_ctx['code_challenge'],
'code_challenge_method': auth_ctx['code_challenge_method']
}
]
}
sessions[session_id] = session
log.info('Created session {}'.format(session_id))
return issue_code_and_redirect(session, auth_ctx['client_id'], auth_ctx['state'], auth_ctx['nonce'])
def issue_code_and_redirect(session, client_id, state, nonce):
code = str(uuid.uuid4())
global code_metadata
code_metadata[code] = {'session_id': session['session_id'], 'client_id': client_id, 'nonce': nonce}
client_session = get_client_session_by_id(session, client_id)
redir_url = build_url(client_session['redirect_uri'], code=code, state=state)
log.info("Redirecting to callback '{}'".format(redir_url))
resp = flask.make_response(flask.redirect(redir_url, code=303))
resp.set_cookie(SESSION_COOKIE_NAME, session['session_id'], samesite='Lax', httponly=True)
return resp
@app.route('/token', methods=['POST'])
def token():
req = flask.request
log_request('GET-TOKEN', req)
client_auth = req.headers.get('Authorization')
log.info("GET-TOKEN: Client auth: '{}'".format(client_auth))
# TODO: Validate client auth
grant_type = req.form.get('grant_type')
log.info("GET-TOKEN: Grant type: '{}'".format(grant_type))
if grant_type == 'authorization_code':
code = req.form.get('code')
redir_uri = req.form.get('redirection_uri')
code_verifier = req.form.get('code_verifier')
if code not in code_metadata:
log.info("GET-TOKEN: Invalid code: '{}'".format(code))
return flask.make_response(flask.render_template('error.html', text='Invalid code'), 403)
log.info("GET-TOKEN: Valid code: '{}'".format(code))
session_id = code_metadata[code]['session_id']
client_id = code_metadata[code]['client_id']
nonce = code_metadata[code]['nonce']
del code_metadata[code] # Code can only be used once
# TODO: Validate that code is not too old
# TODO: Validate redir_uri and grant type matches code
# Context comes from session metadata
session = sessions[session_id]
subject = session['subject']
client_session = get_client_session_by_id(session, client_id)
if client_session['code_challenge']:
log.info("GET-TOKEN: Challenge '{}', verifier '{}', method '{}'".format(client_session['code_challenge'], code_verifier, client_session['code_challenge_method']))
if client_session['code_challenge_method'] == 'plain' and code_verifier != client_session['code_challenge']:
return flask.make_response('error=invalid_grant', 403)
elif client_session['code_challenge_method'] == 'S256':
digest = hashlib.sha256(code_verifier.encode('ascii')).digest()
our_code_challenge = base64.urlsafe_b64encode(digest).decode('ascii')[:-1]
log.info("Self-encoded challenge '{}', got challenge '{}'".format(our_code_challenge, client_session['code_challenge']))
if our_code_challenge != client_session['code_challenge']:
return flask.make_response('error=invalid_grant', 403)
else:
return flask.make_response('error=invalid_grant', 403)
scope = client_session['scope']
access_token_lifetime = cfg_access_token_lifetime
refresh_token_lifetime = cfg_refresh_token_lifetime
elif grant_type == 'refresh_token':
refresh_token = req.form.get('refresh_token')
log.info('GET-TOKEN: Refresh token {}'.format(refresh_token))
# TODO: Validate refresh token
# FIXME
with open('jwt-key.pub', 'rb') as f:
key_data = f.read()
pub_key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
refresh_token_json = jwt.decode(refresh_token, pub_key)
# Context comes from refresh token
session_id = refresh_token_json['session_id']
subject = refresh_token_json['sub']
scope = refresh_token_json['scope']
client_id = refresh_token_json['client_id']
access_token_lifetime = refresh_token_json['access_token_lifetime']
refresh_token_lifetime = refresh_token_json['refresh_token_lifetime']
nonce = refresh_token_json['nonce']
else:
log.error("GET-TOKEN: Invalid grant type: '{}'".format(grant_type))
return 400
# Issue tokens (shared for both 'authorization_code' and 'refresh_token' grants)
log.info('GET-TOKEN: Issuing tokens!')
access_token = issue_token(subject, audience=[api_base_url, own_base_url+'/userinfo'],
claims={
'token_use': 'access',
'scope': scope},
expiry=datetime.datetime.utcnow()+datetime.timedelta(seconds=access_token_lifetime))
refresh_token = issue_token(subject, audience=own_base_url+'/token',
claims={
'client_id': client_id,
'session_id': session_id,
'access_token_lifetime': access_token_lifetime,
'refresh_token_lifetime' : refresh_token_lifetime,
'nonce': nonce,
'token_use': 'refresh',
'scope': scope},
expiry=datetime.datetime.utcnow()+datetime.timedelta(seconds=refresh_token_lifetime))
response = {'access_token': access_token, 'expires_in': access_token_lifetime,
'refresh_token': refresh_token, 'token_type': 'Bearer'}
if 'openid' in scope:
claims = dict()
# See https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims for what claims to include in access token
if 'profile' in scope:
claims['name'] = 'Name of user {}'.format(subject.capitalize())
claims['preferred_username'] = subject.capitalize()
if nonce:
claims['nonce'] = nonce
response['id_token'] = issue_token(subject, [client_id, own_base_url], claims, datetime.datetime.utcnow()+datetime.timedelta(minutes=60))
return flask.Response(json.dumps(response), mimetype='application/json')
@app.route('/userinfo', methods=['GET'])
def userinfo():
req = flask.request
log_request('GET-USERINFO', req)
access_token = req.headers.get('Authorization', None)
# TODO: if not access_token
# TODO: Validate access-token
log.info("GET-USERINFO: Access token: '{}'".format(access_token))
access_token_parts = access_token.split()
if access_token_parts[0].lower() != 'bearer' or len(access_token_parts) != 2:
return flask.render_template('error.html', text='Invalid authorization')
access_token = access_token_parts[1]
# FIXME
with open('jwt-key.pub', 'rb') as f:
key_data = f.read()
pub_key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
access_token_json = jwt.decode(access_token, pub_key)
scope = access_token_json['scope']
# TODO: Validate audience in access token covers /userinfo
log.info("GET-USERINFO: Access token audience: '{}'".format(access_token_json['aud']))
log.info("GET-USERINFO: Scope '{}'".format(scope))
claims = dict()
# See https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims for what claims to include in access token
if 'profile' in scope:
claims['name'] = 'Name of user is {}'.format(access_token_json['sub'].capitalize())
return claims
@app.route('/endsession', methods=['GET', 'POST'])
def endsession():
req = flask.request
id_token_hint = req.values.get('id_token_hint')
redir_url = req.values.get('post_logout_redirect_uri')
# TODO: Validate id_token_hint was issued by us
id_token_claims = jwt.decode(id_token_hint, signing_key_pub)
if own_base_url not in id_token_claims['aud']:
log.error('END-SESSION: ID token hint not for us')
return flask.render_template('error.html', text='ID token not for us')
log.info('END-SESSION: ID token hint claims: {}'.format(id_token_claims))
existing_session_id = get_session_by_subject(id_token_claims['sub'])
if existing_session_id:
session = sessions[existing_session_id]
return flask.render_template('endsession.html', session_id=existing_session_id,
subject=session['subject'], redir_url=redir_url)
else:
return flask.render_template('error.html', text='Error logging out')
@app.route('/endsession-approve', methods=['GET', 'POST'])
def endsession_approve():
req = flask.request
session_id = req.form.get('sessionid')
redir_url = req.form.get('redirurl')
log.info('END-SESSION-APPROVE: Ending session: {}'.format(session_id))
del sessions[session_id]
resp = flask.make_response(flask.redirect(redir_url, code=303))
return resp
@app.route('/.well-known/jwks.json', methods=['GET'])
def jwks():
jwks = { 'keys': [ signing_key_pub.as_dict() ] }
return flask.Response(json.dumps(jwks), mimetype='application/json')
@app.route('/.well-known/openid-configuration', methods=['GET'])
def openid_configuration():
config = { 'issuer': own_base_url,
'authorization_endpoint': own_base_url+'/authorize',
'token_endpoint': own_base_url+'/token',
'userinfo_endpoint': own_base_url+'/userinfo',
'jwks_uri': own_base_url+'/.well-known/jwks.json',
'end_session_endpoint': own_base_url+'/endsession'}
return flask.Response(json.dumps(config), mimetype='application/json')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=app_port)
|
import requests
import sys
import os
from showtimes.app import app
def test_cinema_1_showtimes_index():
client = app.test_client()
reply = client.get("/")
actual_reply = reply.json
assert reply.status_code == 200
def test_cinema_1_booking_showtimes_showtimes():
client = app.test_client()
reply = client.get("/showtimes")
actual_reply = reply.json
assert len(actual_reply) == 6
def test_cinema_1_booking_showtimes_showtime():
client = app.test_client()
for date, expected in GOOD_RESPONSES.items():
reply = client.get("/showtimes/{}".format(date))
actual_reply = reply.json
assert len(actual_reply) == len(expected)
assert set(actual_reply) == set(expected)
def test_cinema_1_booking_showtimes_invalid_showtime():
client = app.test_client()
reply = client.get("/showtimes/{}".format("1"))
actual_reply = reply.json
assert actual_reply == None
assert reply.status_code == 404
GOOD_RESPONSES = {
"20151130": [
"720d006c-3a57-4b6a-b18f-9b713b073f3c",
"a8034f44-aee4-44cf-b32c-74cf452aaaae",
"39ab85e5-5e8e-4dc5-afea-65dc368bd7ab"
],
"20151201": [
"267eedb8-0f5d-42d5-8f43-72426b9fb3e6",
"7daf7208-be4d-4944-a3ae-c1c2f516f3e6",
"39ab85e5-5e8e-4dc5-afea-65dc368bd7ab",
"a8034f44-aee4-44cf-b32c-74cf452aaaae"
],
"20151202": [
"a8034f44-aee4-44cf-b32c-74cf452aaaae",
"96798c08-d19b-4986-a05d-7da856efb697",
"39ab85e5-5e8e-4dc5-afea-65dc368bd7ab",
"276c79ec-a26a-40a6-b3d3-fb242a5947b6"
],
"20151203": [
"720d006c-3a57-4b6a-b18f-9b713b073f3c",
"39ab85e5-5e8e-4dc5-afea-65dc368bd7ab"
],
"20151204": [
"96798c08-d19b-4986-a05d-7da856efb697",
"a8034f44-aee4-44cf-b32c-74cf452aaaae",
"7daf7208-be4d-4944-a3ae-c1c2f516f3e6"
],
"20151205": [
"96798c08-d19b-4986-a05d-7da856efb697",
"a8034f44-aee4-44cf-b32c-74cf452aaaae",
"7daf7208-be4d-4944-a3ae-c1c2f516f3e6",
"276c79ec-a26a-40a6-b3d3-fb242a5947b6",
"39ab85e5-5e8e-4dc5-afea-65dc368bd7ab"
]
}
# if __name__ == "__main__":
# unittest.main()
|
import pytest
from top2vec import Top2Vec
from sklearn.datasets import fetch_20newsgroups
import numpy as np
# get 20 newsgroups data
newsgroups_train = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))
newsgroups_documents = newsgroups_train.data[0:2000]
# train top2vec model without doc_ids provided
top2vec = Top2Vec(documents=newsgroups_documents, speed="fast-learn", workers=8)
# train top2vec model with doc_ids provided
doc_ids = [str(num) for num in range(0, len(newsgroups_documents))]
top2vec_docids = Top2Vec(documents=newsgroups_documents, document_ids=doc_ids, speed="fast-learn", workers=8)
# train top2vec model without saving documents
top2vec_no_docs = Top2Vec(documents=newsgroups_documents, keep_documents=False, speed="fast-learn", workers=8)
# train top2vec model with corpus_file
top2vec_corpus_file = Top2Vec(documents=newsgroups_documents, use_corpus_file=True, speed="fast-learn", workers=8)
# test USE
top2vec_use = Top2Vec(documents=newsgroups_documents, embedding_model='universal-sentence-encoder')
# test USE-multilang
top2vec_use_multilang = Top2Vec(documents=newsgroups_documents,
embedding_model='universal-sentence-encoder-multilingual')
# test USE-multilang
top2vec_transformer_multilang = Top2Vec(documents=newsgroups_documents,
embedding_model='distiluse-base-multilingual-cased')
models = [top2vec, top2vec_docids, top2vec_no_docs, top2vec_corpus_file,
top2vec_use, top2vec_use_multilang, top2vec_transformer_multilang]
def get_model_vocab(top2vec_model):
if top2vec_model.embedding_model == 'doc2vec':
return list(top2vec_model.model.wv.vocab.keys())
else:
return top2vec_model.vocab
@pytest.mark.parametrize('top2vec_model', models)
def test_add_documents_original(top2vec_model):
num_docs = top2vec_model._get_document_vectors().shape[0]
docs_to_add = newsgroups_train.data[0:100]
topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])
if top2vec_model.document_ids is None:
top2vec_model.add_documents(docs_to_add)
else:
doc_ids_new = [str(num) for num in range(2000, 2000 + len(docs_to_add))]
top2vec_model.add_documents(docs_to_add, doc_ids_new)
topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])
num_docs_new = top2vec_model._get_document_vectors().shape[0]
assert topic_count_sum + len(docs_to_add) == topic_count_sum_new == num_docs + len(docs_to_add) \
== num_docs_new == len(top2vec_model.doc_top)
if top2vec_model.documents is not None:
assert num_docs_new == len(top2vec_model.documents)
@pytest.mark.parametrize('top2vec_model', models)
def test_hierarchical_topic_reduction(top2vec_model):
num_topics = top2vec_model.get_num_topics()
if num_topics > 10:
reduced_num = 10
elif num_topics - 1 > 0:
reduced_num = num_topics - 1
hierarchy = top2vec_model.hierarchical_topic_reduction(reduced_num)
assert len(hierarchy) == reduced_num == len(top2vec_model.topic_vectors_reduced)
@pytest.mark.parametrize('top2vec_model', models)
def test_add_documents_post_reduce(top2vec_model):
docs_to_add = newsgroups_train.data[500:600]
num_docs = top2vec_model._get_document_vectors().shape[0]
topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
if top2vec_model.document_ids is None:
top2vec_model.add_documents(docs_to_add)
else:
doc_ids_new = [str(num) for num in range(2100, 2100 + len(docs_to_add))]
top2vec_model.add_documents(docs_to_add, doc_ids_new)
topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum_new = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
num_docs_new = top2vec_model._get_document_vectors().shape[0]
assert topic_count_sum + len(docs_to_add) == topic_count_sum_new == topic_count_reduced_sum + len(docs_to_add) \
== topic_count_reduced_sum_new == num_docs + len(docs_to_add) == num_docs_new == len(top2vec_model.doc_top) \
== len(top2vec_model.doc_top_reduced)
if top2vec_model.documents is not None:
assert num_docs_new == len(top2vec_model.documents)
@pytest.mark.parametrize('top2vec_model', models)
def test_delete_documents(top2vec_model):
doc_ids_to_delete = list(range(500, 550))
num_docs = top2vec_model._get_document_vectors().shape[0]
topic_count_sum = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
if top2vec_model.document_ids is None:
top2vec_model.delete_documents(doc_ids=doc_ids_to_delete)
else:
doc_ids_to_delete = [str(doc_id) for doc_id in doc_ids_to_delete]
top2vec_model.delete_documents(doc_ids=doc_ids_to_delete)
topic_count_sum_new = sum(top2vec_model.get_topic_sizes()[0])
topic_count_reduced_sum_new = sum(top2vec_model.get_topic_sizes(reduced=True)[0])
num_docs_new = top2vec_model._get_document_vectors().shape[0]
assert topic_count_sum - len(doc_ids_to_delete) == topic_count_sum_new == topic_count_reduced_sum - \
len(doc_ids_to_delete) == topic_count_reduced_sum_new == num_docs - len(doc_ids_to_delete) \
== num_docs_new == len(top2vec_model.doc_top) == len(top2vec_model.doc_top_reduced)
if top2vec_model.documents is not None:
assert num_docs_new == len(top2vec_model.documents)
@pytest.mark.parametrize('top2vec_model', models)
def test_get_topic_hierarchy(top2vec_model):
hierarchy = top2vec_model.get_topic_hierarchy()
assert len(hierarchy) == len(top2vec_model.topic_vectors_reduced)
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_get_num_topics(top2vec_model, reduced):
# check that there are more than 0 topics
assert top2vec_model.get_num_topics(reduced=reduced) > 0
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_get_topics(top2vec_model, reduced):
num_topics = top2vec_model.get_num_topics(reduced=reduced)
words, word_scores, topic_nums = top2vec_model.get_topics(reduced=reduced)
# check that for each topic there are words, word_scores and topic_nums
assert len(words) == len(word_scores) == len(topic_nums) == num_topics
# check that for each word there is a score
assert len(words[0]) == len(word_scores[0])
# check that topics words are returned in decreasing order
topic_words_scores = word_scores[0]
assert all(topic_words_scores[i] >= topic_words_scores[i + 1] for i in range(len(topic_words_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_get_topic_size(top2vec_model, reduced):
topic_sizes, topic_nums = top2vec_model.get_topic_sizes(reduced=reduced)
# check that topic sizes add up to number of documents
assert sum(topic_sizes) == top2vec_model._get_document_vectors().shape[0]
# check that topics are ordered decreasingly
assert all(topic_sizes[i] >= topic_sizes[i + 1] for i in range(len(topic_sizes) - 1))
# @pytest.mark.parametrize('top2vec_model', models)
# @pytest.mark.parametrize('reduced', [False, True])
# def test_generate_topic_wordcloud(top2vec_model, reduced):
# # generate word cloud
# num_topics = top2vec_model.get_num_topics(reduced=reduced)
# top2vec_model.generate_topic_wordcloud(num_topics - 1, reduced=reduced)
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_search_documents_by_topic(top2vec_model, reduced):
# get topic sizes
topic_sizes, topic_nums = top2vec_model.get_topic_sizes(reduced=reduced)
topic = topic_nums[0]
num_docs = topic_sizes[0]
# search documents by topic
if top2vec_model.documents is not None:
documents, document_scores, document_ids = top2vec_model.search_documents_by_topic(topic, num_docs,
reduced=reduced)
else:
document_scores, document_ids = top2vec_model.search_documents_by_topic(topic, num_docs, reduced=reduced)
# check that for each document there is a score and number
if top2vec_model.documents is not None:
assert len(documents) == len(document_scores) == len(document_ids) == num_docs
else:
assert len(document_scores) == len(document_ids) == num_docs
# check that documents are returned in decreasing order
assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))
# check that all documents returned are most similar to topic being searched
if top2vec_model.document_ids is not None:
document_indexes = [top2vec_model.doc_id2index[doc_id] for doc_id in document_ids]
else:
document_indexes = document_ids
if reduced:
doc_topics = set(np.argmax(
np.inner(top2vec_model._get_document_vectors()[document_indexes],
top2vec_model.topic_vectors_reduced), axis=1))
else:
doc_topics = set(np.argmax(
np.inner(top2vec_model._get_document_vectors()[document_indexes],
top2vec_model.topic_vectors), axis=1))
assert len(doc_topics) == 1 and topic in doc_topics
@pytest.mark.parametrize('top2vec_model', models)
def test_search_documents_by_keywords(top2vec_model):
keywords = get_model_vocab(top2vec_model)
keyword = keywords[-1]
num_docs = 10
if top2vec_model.documents is not None:
documents, document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],
num_docs=num_docs)
else:
document_scores, document_ids = top2vec_model.search_documents_by_keywords(keywords=[keyword],
num_docs=num_docs)
# check that for each document there is a score and number
if top2vec_model.documents is not None:
assert len(documents) == len(document_scores) == len(document_ids) == num_docs
else:
assert len(document_scores) == len(document_ids) == num_docs
# check that documents are returned in decreasing order
assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
def test_similar_words(top2vec_model):
keywords = get_model_vocab(top2vec_model)
keyword = keywords[-1]
num_words = 20
words, word_scores = top2vec_model.similar_words(keywords=[keyword], num_words=num_words)
# check that there is a score for each word
assert len(words) == len(word_scores) == num_words
# check that words are returned in decreasing order
assert all(word_scores[i] >= word_scores[i + 1] for i in range(len(word_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
@pytest.mark.parametrize('reduced', [False, True])
def test_search_topics(top2vec_model, reduced):
num_topics = top2vec_model.get_num_topics(reduced=reduced)
keywords = get_model_vocab(top2vec_model)
keyword = keywords[-1]
topic_words, word_scores, topic_scores, topic_nums = top2vec_model.search_topics(keywords=[keyword],
num_topics=num_topics,
reduced=reduced)
# check that for each topic there are topic words, word scores, topic scores and score of topic
assert len(topic_words) == len(word_scores) == len(topic_scores) == len(topic_nums) == num_topics
# check that for each topic words have scores
assert len(topic_words[0]) == len(word_scores[0])
# check that topics are returned in decreasing order
assert all(topic_scores[i] >= topic_scores[i + 1] for i in range(len(topic_scores) - 1))
# check that topics words are returned in decreasing order
topic_words_scores = word_scores[0]
assert all(topic_words_scores[i] >= topic_words_scores[i + 1] for i in range(len(topic_words_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
def test_search_document_by_documents(top2vec_model):
if top2vec_model.document_ids is not None:
doc_id = top2vec_model.document_ids[0]
else:
doc_id = 0
num_docs = 10
if top2vec_model.documents is not None:
documents, document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],
num_docs=num_docs)
else:
document_scores, document_ids = top2vec_model.search_documents_by_documents(doc_ids=[doc_id],
num_docs=num_docs)
# check that for each document there is a score and number
if top2vec_model.documents is not None:
assert len(documents) == len(document_scores) == len(document_ids) == num_docs
else:
assert len(document_scores) == len(document_ids) == num_docs
# check that documents are returned in decreasing order
assert all(document_scores[i] >= document_scores[i + 1] for i in range(len(document_scores) - 1))
@pytest.mark.parametrize('top2vec_model', models)
def test_get_documents_topics(top2vec_model):
if top2vec_model.document_ids is not None:
doc_ids_get = top2vec_model.document_ids[[0, 5]]
else:
doc_ids_get = [0, 5]
if top2vec_model.hierarchy is not None:
doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get,
reduced=True)
else:
doc_topics, doc_dist, topic_words, topic_word_scores = top2vec_model.get_documents_topics(doc_ids=doc_ids_get)
assert len(doc_topics) == len(doc_dist) == len(topic_words) == len(topic_word_scores) == len(doc_ids_get)
|
from excel2py.base_proforma_calc import BaseProformaCalc
import unittest
class Sub(BaseProformaCalc):
inputs = {'x'}
class TestBaseProformaCalc(unittest.TestCase):
def test_good(self):
s = Sub()
s.calculate(x=4)
def test_missing(self):
s = Sub()
with self.assertRaises(TypeError):
s.calculate()
def test_extra(self):
s = Sub()
with self.assertRaises(TypeError):
s.calculate(x=5, y=3, z=4)
if __name__ == "__main__":
unittest.main()
|
import json
import logging
import time
import datetime as dt
import calendar
import numpy as np
from six.moves import cPickle as pickle
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
log = logging.getLogger(__name__)
millifactor = 10 ** 6.
class BokehJSONEncoder(json.JSONEncoder):
def transform_series(self, obj):
"""transform series
"""
vals = obj.values
return self.transform_array(vals)
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
def transform_array(self, obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if self.legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / millifactor).tolist()
# else punt.
else:
return obj.astype('datetime64[ms]').astype('int64').tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return self.transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(self, obj):
"""handles nans/inf conversion
"""
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
# Pandas Timestamp
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / millifactor #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
# Datetime, Date
elif isinstance(obj, (dt.datetime, dt.date)):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour*3600 + obj.minute*60 + obj.second)*1000 + obj.microsecond / 1000.
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return self.transform_series(obj)
elif isinstance(obj, np.ndarray):
return self.transform_array(obj)
elif isinstance(obj, PlotObject):
return obj.ref
elif isinstance(obj, HasProps):
return obj.changed_properties_with_values()
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
rslt = json.dumps(obj, cls=encoder, **kwargs)
return rslt
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
|
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppInfo tools.
This library allows you to work with AppInfo records in memory, as well as store
and load from configuration files.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
# Parts of the code in this file are duplicated in
# //java/com/google/apphosting/admin/legacy/...
# This is part of an ongoing effort to replace the deployment API.
# Until we can delete this code, please check to see if your changes need
# to be reflected in the java code. For questions, talk to clouser@ or
from __future__ import absolute_import
import logging
import os
import re
import string
import sys
import wsgiref.util
# pylint: disable=g-import-not-at-top
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
# This case covers both Python 2.5 and unittests, which are 2.5 only.
from googlecloudsdk.third_party.appengine.api import validation
from googlecloudsdk.third_party.appengine.api import yaml_builder
from googlecloudsdk.third_party.appengine.api import yaml_listener
from googlecloudsdk.third_party.appengine.api import yaml_object
from googlecloudsdk.third_party.appengine.api import appinfo_errors
from googlecloudsdk.third_party.appengine.api import backendinfo
from googlecloudsdk.third_party.appengine._internal import six_subset
# pylint: enable=g-import-not-at-top
# Regular expression for matching URL, file, URL root regular expressions.
# `url_root` is identical to url except it additionally imposes not ending with
# *.
# TODO(user): `url_root` should generally allow a URL but not a regex or
# glob.
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'.+'
_URL_ROOT_REGEX = r'/.*'
# Regular expression for matching cache expiration deltas.
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_NON_WHITE_SPACE_REGEX = r'^\S+$'
# Regular expression for matching service names.
# TODO(user): this may need altering so as to not leak unreleased service names
# TODO(user): Re-add sms to list of services.
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
# Regular expression for matching page names.
_PAGE_NAME_REGEX = r'^.+$'
# Constants for interpreting expiration deltas.
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
# Constant values from `apphosting/base/constants.h`
# TODO(user): Maybe a python constants file.
APP_ID_MAX_LEN = 100
MODULE_ID_MAX_LEN = 63
# See b/5485871 for why this is 100 and not 63.
# NOTE(user): See b/5485871 for why this is different from the
# `apphosting/base/constants.h` value.
MODULE_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
# The character separating the partition from the domain.
PARTITION_SEPARATOR = '~'
# The character separating the domain from the display-app-id.
DOMAIN_SEPARATOR = ':'
# The character separating major and minor versions.
VERSION_SEPARATOR = '.'
# The character separating module from module version.
MODULE_SEPARATOR = ':'
# The name of the default module
DEFAULT_MODULE = 'default'
# Regular expression for ID types. Defined in apphosting/base/id_util.cc.
PARTITION_RE_STRING_WITHOUT_SEPARATOR = (r'[a-z\d\-]{1,%d}' % APP_ID_MAX_LEN)
PARTITION_RE_STRING = (r'%s\%s' %
(PARTITION_RE_STRING_WITHOUT_SEPARATOR,
PARTITION_SEPARATOR))
DOMAIN_RE_STRING_WITHOUT_SEPARATOR = (r'(?!\-)[a-z\d\-\.]{1,%d}' %
APP_ID_MAX_LEN)
DOMAIN_RE_STRING = (r'%s%s' %
(DOMAIN_RE_STRING_WITHOUT_SEPARATOR, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
# NOTE(user,user): These regexes have been copied to multiple other
# locations in google.apphosting so we don't have to pull this file into
# python_lib for other modules to work in production.
# Other known locations as of 2016-08-15:
# - java/com/google/apphosting/admin/legacy/LegacyAppInfo.java
# - apphosting/client/app_config_old.cc
# - apphosting/api/app_config/app_config_server2.cc
MODULE_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (MODULE_ID_MAX_LEN - 1)
MODULE_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(MODULE_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
# Note that this regex will not allow zero-prefixed numbers, e.g. 0001.
_INSTANCES_REGEX = r'^[1-9][\d]*$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
# This enforces that we will only accept a single decimal point of accuracy at
# the granularity of seconds and no decimal point with a granularity of
# milliseconds.
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
GCE_RESOURCE_NAME_REGEX = r'^[a-z]([a-z\d-]{0,61}[a-z\d])?$'
VPC_ACCESS_CONNECTOR_NAME_REGEX = r'^[a-z\d-]+(/[a-z\d-]+)*$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
# Note(user): This must match api/app_config.py
BUILTIN_NAME_PREFIX = 'ah-builtin'
# Here we expect either normal runtimes (such as 'nodejs' or 'java') or
# pinned runtime builders, which take the form of the path to a cloudbuild.yaml
# manifest file in GCS (written as gs://bucket/path/to/build.yaml).
RUNTIME_RE_STRING = r'((gs://[a-z0-9\-\._/]+)|([a-z][a-z0-9\-\.]{0,29}))'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
ENV_RE_STRING = r'(1|2|standard|flex|flexible)'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
DATASTORE_ID_POLICY_LEGACY = 'legacy'
DATASTORE_ID_POLICY_DEFAULT = 'default'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
# Used for missing values; see http://b/issue?id=2073962.
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
# Expression meaning to skip no files, which is the default for AppInclude.
SKIP_NO_FILES = r'(?!)'
DEFAULT_NOBUILD_FILES = (r'^$')
# Attributes for `URLMap`
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
REDIRECT_HTTP_RESPONSE_CODE = 'redirect_http_response_code'
# Attributes for `AppInfoExternal`
APPLICATION = 'application'
PROJECT = 'project' # An alias for 'application'
MODULE = 'module'
SERVICE = 'service'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
ZONES = 'zones'
BETA_SETTINGS = 'beta_settings'
VM_HEALTH_CHECK = 'vm_health_check'
HEALTH_CHECK = 'health_check'
RESOURCES = 'resources'
LIVENESS_CHECK = 'liveness_check'
READINESS_CHECK = 'readiness_check'
NETWORK = 'network'
VPC_ACCESS_CONNECTOR = 'vpc_access_connector'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
RUNTIME_CHANNEL = 'runtime_channel'
API_VERSION = 'api_version'
ENDPOINTS_API_SERVICE = 'endpoints_api_service'
ENV = 'env'
ENTRYPOINT = 'entrypoint'
RUNTIME_CONFIG = 'runtime_config'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
DATASTORE_AUTO_ID_POLICY = 'auto_id_policy'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
STANDARD_WEBSOCKET = 'standard_websocket'
SOURCE_REPO_RE_STRING = r'^[a-z][a-z0-9\-\+\.]*:[^#]*$'
SOURCE_REVISION_RE_STRING = r'^[0-9a-fA-F]+$'
# Maximum size of all source references (in bytes) for a deployment.
SOURCE_REFERENCES_MAX_SIZE = 2048
INSTANCE_CLASS = 'instance_class'
# Attributes for Standard App Engine (only) AutomaticScaling.
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
# Attributes for Managed VMs (only) AutomaticScaling. These are very
# different than Standard App Engine because scaling settings are
# mapped to Cloud Autoscaler (as opposed to the clone scheduler). See
MIN_NUM_INSTANCES = 'min_num_instances'
MAX_NUM_INSTANCES = 'max_num_instances'
COOL_DOWN_PERIOD_SEC = 'cool_down_period_sec'
CPU_UTILIZATION = 'cpu_utilization'
CPU_UTILIZATION_UTILIZATION = 'target_utilization'
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC = 'aggregation_window_length_sec'
# Managed VMs Richer Autoscaling. These (MVMs only) scaling settings
# are supported for both vm:true and env:2|flex, but are not yet
# publicly documented.
TARGET_NETWORK_SENT_BYTES_PER_SEC = 'target_network_sent_bytes_per_sec'
TARGET_NETWORK_SENT_PACKETS_PER_SEC = 'target_network_sent_packets_per_sec'
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC = 'target_network_received_bytes_per_sec'
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC = (
'target_network_received_packets_per_sec')
TARGET_DISK_WRITE_BYTES_PER_SEC = 'target_disk_write_bytes_per_sec'
TARGET_DISK_WRITE_OPS_PER_SEC = 'target_disk_write_ops_per_sec'
TARGET_DISK_READ_BYTES_PER_SEC = 'target_disk_read_bytes_per_sec'
TARGET_DISK_READ_OPS_PER_SEC = 'target_disk_read_ops_per_sec'
TARGET_REQUEST_COUNT_PER_SEC = 'target_request_count_per_sec'
TARGET_CONCURRENT_REQUESTS = 'target_concurrent_requests'
# Custom Metric autoscaling. These are supported for Flex only.
CUSTOM_METRICS = 'custom_metrics'
METRIC_NAME = 'metric_name'
TARGET_TYPE = 'target_type'
TARGET_TYPE_REGEX = r'^(GAUGE|DELTA_PER_SECOND|DELTA_PER_MINUTE)$'
CUSTOM_METRIC_UTILIZATION = 'target_utilization'
SINGLE_INSTANCE_ASSIGNMENT = 'single_instance_assignment'
FILTER = 'filter'
# Attributes for ManualScaling
INSTANCES = 'instances'
# Attributes for BasicScaling
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
# Attributes for AdminConsole
PAGES = 'pages'
NAME = 'name'
# Attributes for EndpointsApiService
ENDPOINTS_NAME = 'name'
CONFIG_ID = 'config_id'
ROLLOUT_STRATEGY = 'rollout_strategy'
ROLLOUT_STRATEGY_FIXED = 'fixed'
ROLLOUT_STRATEGY_MANAGED = 'managed'
TRACE_SAMPLING = 'trace_sampling'
# Attributes for ErrorHandlers
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
# Attributes for BuiltinHandler
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
# Attributes for `VmHealthCheck`. Please refer to message `VmHealthCheck` in
# `request_path` and `port` are not configurable yet.
ENABLE_HEALTH_CHECK = 'enable_health_check'
CHECK_INTERVAL_SEC = 'check_interval_sec'
TIMEOUT_SEC = 'timeout_sec'
APP_START_TIMEOUT_SEC = 'app_start_timeout_sec'
UNHEALTHY_THRESHOLD = 'unhealthy_threshold'
HEALTHY_THRESHOLD = 'healthy_threshold'
FAILURE_THRESHOLD = 'failure_threshold'
SUCCESS_THRESHOLD = 'success_threshold'
RESTART_THRESHOLD = 'restart_threshold'
INITIAL_DELAY_SEC = 'initial_delay_sec'
HOST = 'host'
PATH = 'path'
# Attributes for Resources.
CPU = 'cpu'
MEMORY_GB = 'memory_gb'
DISK_SIZE_GB = 'disk_size_gb'
# Attributes for Resources:Volumes.
VOLUMES = 'volumes'
VOLUME_NAME = 'name'
VOLUME_TYPE = 'volume_type'
SIZE_GB = 'size_gb'
# Attributes for Network.
FORWARDED_PORTS = 'forwarded_ports'
INSTANCE_TAG = 'instance_tag'
NETWORK_NAME = 'name'
SUBNETWORK_NAME = 'subnetwork_name'
SESSION_AFFINITY = 'session_affinity'
# Attributes for Scheduler Settings
STANDARD_MIN_INSTANCES = 'min_instances'
STANDARD_MAX_INSTANCES = 'max_instances'
STANDARD_TARGET_CPU_UTILIZATION = 'target_cpu_utilization'
STANDARD_TARGET_THROUGHPUT_UTILIZATION = 'target_throughput_utilization'
# Attributes for `VpcAccessConnector`.
VPC_ACCESS_CONNECTOR_NAME = 'name'
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
latest_version,
default_version=None,
deprecated_versions=None,
experimental_versions=None,
hidden_versions=None):
"""Initializer for `_VersionedLibrary`.
Args:
name: The name of the library; for example, `django`.
url: The URL for the library's project page; for example,
`http://www.djangoproject.com/`.
description: A short description of the library; for example,
`A framework...`.
supported_versions: A list of supported version names, ordered by release
date; for example, `["v1", "v2", "v3"]`.
latest_version: The version of the library that will be used when you
specify `latest.` The rule of thumb is that this value should be the
newest version that is neither deprecated nor experimental; however
this value might be an experimental version if all of the supported
versions are either deprecated or experimental.
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime, or `None` if the library is not available
by default; for example, `v1`.
deprecated_versions: A list of the versions of the library that have been
deprecated; for example, `["v1", "v2"]`. Order by release version.
experimental_versions: A list of the versions of the library that are
currently experimental; for example, `["v1"]`. Order by release
version.
hidden_versions: A list of versions that will not show up in public
documentation for release purposes. If, as a result, the library
has no publicly documented versions, the entire library won't show
up in the docs. Order by release version.
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.latest_version = latest_version
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
self.hidden_versions = hidden_versions or []
@property
def hidden(self):
"""Determines if the entire library should be hidden from public docs.
Returns:
True if there is every supported version is hidden.
"""
return sorted(self.supported_versions) == sorted(self.hidden_versions)
@property
def non_deprecated_versions(self):
"""Retrieves the versions of the library that are not deprecated.
Returns:
A list of the versions of the library that are not deprecated.
"""
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'clearsilver',
'http://www.clearsilver.net/',
'A fast, powerful, and language-neutral HTML template system.',
['0.10.5'],
latest_version='0.10.5',
hidden_versions=['0.10.5'],
),
_VersionedLibrary(
'click',
'http://click.pocoo.org/',
'A command line library for Python.',
['6.6'],
latest_version='6.6',
hidden_versions=['6.6'],
),
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5', '1.9', '1.11'],
latest_version='1.4',
deprecated_versions=['1.2', '1.3', '1.5', '1.9'],
# TODO(b/78247136) Deprecate 1.4 and update latest_version to 1.11
),
_VersionedLibrary(
'enum',
'https://pypi.python.org/pypi/enum34',
'A backport of the enum module introduced in python 3.4',
['0.9.23'],
latest_version='0.9.23',
),
_VersionedLibrary(
'endpoints',
'https://cloud.google.com/appengine/docs/standard/python/endpoints/',
'Libraries for building APIs in an App Engine application.',
['1.0'],
latest_version='1.0',
),
_VersionedLibrary(
'flask',
'http://flask.pocoo.org/',
'Flask is a microframework for Python based on Werkzeug, Jinja 2 '
'and good intentions.',
['0.12'],
latest_version='0.12',
),
_VersionedLibrary(
'futures',
'https://docs.python.org/3/library/concurrent.futures.html',
'Backport of Python 3.2 Futures.',
['3.0.5'],
latest_version='3.0.5',
),
_VersionedLibrary(
'grpcio',
'http://www.grpc.io/',
'A high performance general RPC framework',
['1.0.0'],
latest_version='1.0.0',
experimental_versions=['1.0.0'],
),
_VersionedLibrary(
'itsdangerous',
'http://pythonhosted.org/itsdangerous/',
'HMAC and SHA1 signing for Python.',
['0.24'],
latest_version='0.24',
hidden_versions=['0.24'],
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6'],
latest_version='2.6',
),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3', '2.3.5', '3.7.3'],
latest_version='3.7.3',
deprecated_versions=['2.3', '2.3.5'],
),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15', '0.23'],
latest_version='0.15',
),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
latest_version='1.2.0',
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4', '1.2.4', '1.2.5'],
latest_version='1.2.5',
deprecated_versions=['1.2.4b4', '1.2.4'],
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1'],
latest_version='1.6.1',
),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7'],
latest_version='1.1.7',
),
_VersionedLibrary(
'protorpc',
'https://code.google.com/p/google-protorpc/',
'A framework for implementing HTTP-based remote procedure call (RPC) '
'services.',
['1.0'],
latest_version='1.0',
default_version='1.0',
),
_VersionedLibrary(
'pytz',
'https://pypi.python.org/pypi/pytz?',
'A library for cross-platform timezone calculations',
['2016.4', '2017.2', '2017.3'],
latest_version='2017.3',
default_version='2017.3',
deprecated_versions=['2016.4', '2017.2'],
),
_VersionedLibrary(
'crcmod',
'http://crcmod.sourceforge.net/',
'A library for generating Cyclic Redundancy Checks (CRC).',
['1.7'],
latest_version='1.7',
),
_VersionedLibrary(
'protobuf',
'https://developers.google.com/protocol-buffers/',
'A library for serializing structured data',
['3.0.0'],
latest_version='3.0.0',
experimental_versions=['3.0.0'],
),
_VersionedLibrary(
'PyAMF',
'https://pypi.python.org/pypi/PyAMF',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1', '0.7.2'],
latest_version='0.6.1',
experimental_versions=['0.7.2'],
),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptography functions such as random number generation.',
['2.3', '2.6', '2.6.1'],
latest_version='2.6',
deprecated_versions=['2.3'],
# TODO(b/78247136) Deprecate 2.6 and update latest_version to 2.6.1
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11', '36.6.0'],
latest_version='36.6.0',
deprecated_versions=['0.6c11'],
),
_VersionedLibrary(
'six',
'https://pypi.python.org/pypi/six',
'Abstract differences between py2.x and py3',
['1.9.0'],
latest_version='1.9.0',
),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7', '2.7.11'],
latest_version='2.7.11',
deprecated_versions=['2.7']
),
_VersionedLibrary(
'ujson',
'https://pypi.python.org/pypi/ujson',
'UltraJSON is an ultra fast JSON encoder and decoder written in pure C',
['1.35'],
latest_version='1.35',
),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
latest_version='2.5.2',
# Keep default version at 2.3 because apps in production depend on it.
default_version='2.3',
deprecated_versions=['2.5.1']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
latest_version='1.2.3',
# Keep default version at 1.1.1 because apps in production depend on it.
default_version='1.1.1',
),
_VersionedLibrary(
'werkzeug',
'http://www.werkzeug.pocoo.org/',
'A WSGI utility library.',
['0.11.10'],
latest_version='0.11.10',
default_version='0.11.10',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
latest_version='3.10',
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
# A mapping from third-party name/version to a list of that library's
# dependencies.
REQUIRED_LIBRARIES = {
('django', '1.11'): [('pytz', '2017.2')],
('flask', '0.12'): [('click', '6.6'), ('itsdangerous', '0.24'),
('jinja2', '2.6'), ('werkzeug', '0.11.10')],
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
('protobuf', '3.0.0'): [('six', '1.9.0')],
('protobuf', 'latest'): [('six', 'latest')],
('grpcio', '1.0.0'): [('protobuf', '3.0.0'), ('enum', '0.9.23'),
('futures', '3.0.5'), ('six', '1.9.0'),
('setuptools', '36.6.0')],
('grpcio', 'latest'): [('protobuf', 'latest'), ('enum', 'latest'),
('futures', 'latest'), ('six', 'latest'),
('setuptools', 'latest')]
}
_USE_VERSION_FORMAT = ('use one of: "%s"')
# See RFC 2616 section 2.2.
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
# Source: http://www.cs.tut.fi/~jkorpela/http.html
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
# The minimum cookie length (i.e. number of bytes) that HTTP clients should
# support, per RFCs 2109 and 2965.
_MAX_COOKIE_LENGTH = 4096
# trailing NULL character, which is why this is not 2048.
_MAX_URL_LENGTH = 2047
# We allow certain headers to be larger than the normal limit of 8192 bytes.
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS = 10240
_CANNED_RUNTIMES = ('contrib-dart', 'dart', 'go', 'php', 'php55', 'php72',
'python', 'python27', 'python-compat', 'java', 'java7',
'java8', 'vm', 'custom', 'nodejs', 'ruby')
_all_runtimes = _CANNED_RUNTIMES
def GetAllRuntimes():
"""Returns the list of all valid runtimes.
This list can include third-party runtimes as well as canned runtimes.
Returns:
Tuple of strings.
"""
return _all_runtimes
def EnsureAsciiBytes(s, err):
"""Ensure s contains only ASCII-safe characters; return it as bytes-type.
Arguments:
s: the string or bytes to check
err: the error to raise if not good.
Raises:
err if it's not ASCII-safe.
Returns:
s as a byte string
"""
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise err
except UnicodeDecodeError:
# Python 2 hilariously raises UnicodeDecodeError on trying to
# ascii-_en_code a byte string invalidly.
raise err
except AttributeError:
try:
return s.decode('ascii').encode('ascii')
except UnicodeDecodeError:
raise err
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
# Common fields.
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
# Python/CGI fields.
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values to what `http_headers` allows.
`http_headers` is an static handler key; it applies to handlers with
`static_dir` or `static_files` keys. The following code is an example of how
`http_headers` is used::
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
# TODO(user): I don't think there's any reason to disallow users
# from setting Content-Encoding, but other parts of the system prevent
# this; therefore, we disallow it here. See the following discussion:
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-webkit-csp': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'content-security-policy-report-only':
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in `HttpHeadersDict` are valid.
`HttpHeadersDict` contains a list of headers. An instance is used as
`HttpHeadersDict`'s `KEY_VALIDATOR`.
"""
def Validate(self, name, unused_key=None):
"""Returns an argument, or raises an exception if the argument is invalid.
HTTP header names are defined by `RFC 2616, section 4.2`_.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: An argument cannot be used as an
HTTP header name.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
original_name = name
# Make sure only ASCII data is used.
if isinstance(name, six_subset.string_types):
name = EnsureAsciiBytes(name, appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data'))
# HTTP headers are case-insensitive.
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
# Request headers shouldn't be used in responses.
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
# Make sure that none of the reserved prefixes is used.
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in `HttpHeadersDict` are valid.
An instance is used as `HttpHeadersDict`'s `VALUE_VALIDATOR`.
"""
def Validate(self, value, key=None):
"""Returns a value, or raises an exception if the value is invalid.
According to `RFC 2616 section 4.2`_ header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string"::
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
A value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: An argument cannot be used as an
HTTP header value.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
# Make sure only ASCII data is used.
if isinstance(value, six_subset.string_types):
value = EnsureAsciiBytes(value, appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data'))
b_value = value
else:
b_value = ('%s' % value).encode('ascii')
# HTTP headers are case-insensitive.
key = key.lower()
# TODO(user): This is the same check that appserver performs, but it
# could be stronger. e.g. `"foo` should not be considered valid, because
# HTTP does not allow unclosed double quote marks in header values, per
# RFC 2616 section 4.2.
printable = set(string.printable[:-5].encode('ascii'))
if not all(b in printable for b in b_value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
# The `>=` operator here is a little counter-intuitive. The reason for it
# is that I'm trying to follow the
# `HTTPProto::IsValidHeader` implementation.
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
# If execution reaches this point, it generally means the header is too
# long, but there are a few exceptions, which are listed in the next
# dict.
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
# We are dealing with one of the exceptional headers with larger maximum
# value lengths.
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to `header_name`. If more than one such
value is in `self`, one of the values is selected arbitrarily and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
# TODO(user): Perhaps, this functionality should be part of
# `validation.ValidatedDict`.
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
r"""Maps from URLs to handlers.
This class acts similar to a union type. Its purpose is to describe a mapping
between a set of URLs and their handlers. The handler type of a given instance
is determined by which `handler-id` attribute is used.
Every mapping can have one and only one handler type. Attempting to use more
than one `handler-id` attribute will cause an `UnknownHandlerType` to be
raised during validation. Failure to provide any `handler-id` attributes will
cause `MissingHandlerType` to be raised during validation.
The regular expression used by the `url` field will be used to match against
the entire URL path and query string of the request; therefore, partial maps
will not be matched. Specifying a `url`, such as `/admin`, is the same as
matching against the regular expression `^/admin$`. Don't start your matching
`url` with `^` or end them with `$`. These regular expressions won't be
accepted and will raise `ValueError`.
Attributes:
login: Specifies whether a user should be logged in to access a URL.
The default value of this argument is `optional`.
secure: Sets the restriction on the protocol that can be used to serve this
URL or handler. This value can be set to `HTTP`, `HTTPS` or `either`.
url: Specifies a regular expression that is used to fully match against the
request URLs path. See the "Special cases" section of this document to
learn more.
static_files: Specifies the handler ID attribute that maps `url` to the
appropriate file. You can specify regular expression backreferences to
the string matched to `url`.
upload: Specifies the regular expression that is used by the application
configuration program to determine which files are uploaded as blobs.
Because it is difficult to determine this information using just the
`url` and `static_files` arguments, this attribute must be included.
This attribute is required when you define a `static_files` mapping. A
matching file name must fully match against the `upload` regular
expression, similar to how `url` is matched against the request path. Do
not begin the `upload` argument with the `^` character or end it with
the `$` character.
static_dir: Specifies the handler ID that maps the provided `url` to a
sub-directory within the application directory. See "Special cases."
mime_type: When used with `static_files` and `static_dir`, this argument
specifies that the MIME type of the files that are served from those
directories must be overridden with this value.
script: Specifies the handler ID that maps URLs to a script handler within
the application directory that will run using CGI.
position: Used in `AppInclude` objects to specify whether a handler should
be inserted at the beginning of the primary handler list or at the end.
If `tail` is specified, the handler is inserted at the end; otherwise,
the handler is inserted at the beginning. This behavior implies that
`head` is the effective default.
expiration: When used with static files and directories, this argument
specifies the time delta to use for cache expiration. This argument
should use the following format: `4d 5h 30m 15s`, where each letter
signifies days, hours, minutes, and seconds, respectively. The `s` for
"seconds" can be omitted. Only one amount must be specified, though
combining multiple amounts is optional. The following list contains
examples of values that are acceptable: `10`, `1d 6h`, `1h 30m`,
`7d 7d 7d`, `5m 30`.
api_endpoint: Specifies the handler ID that identifies an endpoint as an API
endpoint. Calls that terminate here will be handled by the API serving
framework.
Special cases:
When defining a `static_dir` handler, do not use a regular expression in the
`url` attribute. Both the `url` and `static_dir` attributes are
automatically mapped to these equivalents::
<url>/(.*)
<static_dir>/\1
For example, this declaration...::
url: /images
static_dir: images_folder
...is equivalent to this `static_files` declaration::
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
# Static file fields.
# File mappings are allowed to have regex back references.
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
# Static directory fields.
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
# Used in both static mappings.
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
# Python/CGI fields.
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
REDIRECT_HTTP_RESPONSE_CODE: validation.Optional(validation.Options(
'301', '302', '303', '307')),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([
URL, LOGIN, AUTH_FAIL_ACTION, SECURE, REDIRECT_HTTP_RESPONSE_CODE])
# The keys of this map are attributes which can be used to identify each
# mapping type in addition to the handler identifying attribute itself.
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Gets the handler for a mapping.
Returns:
The value of the handler, as determined by the handler ID attribute.
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Gets the handler type of a mapping.
Returns:
The handler type as determined by which handler ID attribute is set.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
MissingHandlerAttribute: If a URL handler is missing an attribute.
"""
# Special case for the `api_endpoint` handler as it may have a `script`
# attribute as well.
if getattr(self, HANDLER_API_ENDPOINT) is not None:
# Matched id attribute, break out of loop.
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS:
# Attributes always exist as defined by ATTRIBUTES.
if getattr(self, id_field) is not None:
# Matched id attribute, break out of loop.
mapping_type = id_field
break
else:
# If no mapping type is found raise exception.
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
# Make sure that none of the set attributes on this handler
# are not allowed for the discovered handler type.
for attribute in self.ATTRIBUTES:
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
# Also check that static file map has 'upload'.
# NOTE: Add REQUIRED_FIELDS along with ALLOWED_FIELDS if any more
# exceptional cases arise.
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure a handler has correct fields.
In addition to normal `ValidatedCheck`, this method calls `GetHandlerType`,
which validates whether all of the handler fields are configured properly.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: If `mime_type` is inconsistent with
`http_headers`.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
# re how headers that affect caching interact per RFC 2616:
#
# Section 13.1.3 says that when there is "apparent conflict between
# [Cache-Control] header values, the most restrictive interpretation is
# applied".
#
# Section 14.21 says that Cache-Control: max-age overrides Expires
# headers.
#
# Section 14.32 says that Pragma: no-cache has no meaning in responses;
# therefore, we do not need to be concerned about that header here.
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that `self.http_headers` is consistent with `self.mime_type`.
This method assumes that `self` is a static handler, either
`self.static_dir` or `self.static_files`. You cannot specify `None`.
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: If `self.http_headers`
contains a `Content-Type` header, and `self.mime_type` is set. For
example, the following configuration would be rejected::
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
`http_headers` and `mime_type` specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Forces omitted `secure` handler fields to be set to 'secure: optional'.
The effect is that `handler.secure` is never equal to the nominal default.
"""
# See http://b/issue?id=2073962.
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See the `version element documentation`_ to learn which URLs are reserved.
.. _`version element documentation`:
https://cloud.google.com/appengine/docs/python/config/appref#syntax
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
Raises:
PositionUsedInAppYamlHandler: If the `position` attribute is specified for
an `app.yaml` file instead of an `include.yaml` file.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing the admin console page in an `AdminConsole` object."""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing an admin console directives in application info."""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Returns the result of merging two `AdminConsole` objects."""
# Right now this method only needs to worry about the pages attribute of
# `AdminConsole`. However, since this object is valid as part of an
# `AppInclude` object, any objects added to `AdminConsole` in the future
# must also be merged. Rather than burying the merge logic in the process
# of merging two `AppInclude` objects, it is centralized here. If you modify
# the `AdminConsole` object to support other objects, you must also modify
# this method to support merging those additional objects.
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info."""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing built-in handler directives in application info.
This class permits arbitrary keys, but their values must be described by the
`validation.Options` object that is returned by `ATTRIBUTES`.
"""
# `Validated` is a somewhat complicated class. It actually maintains two
# dictionaries: the `ATTRIBUTES` dictionary and an internal `__dict__` object
# that maintains key value pairs.
#
# The normal flow is that a key must exist in `ATTRIBUTES` in order to be able
# to be inserted into `__dict__`. So that's why we force the
# `ATTRIBUTES.__contains__` method to always return `True`; we want to accept
# any attribute. Once the method returns `True`, then its value will be
# fetched, which returns `ATTRIBUTES[key]`; that's why we override
# `ATTRIBUTES.__getitem__` to return the validator for a `BuiltinHandler`
# object.
#
# This is where it gets tricky. Once the validator object is returned, then
# `__dict__[key]` is set to the validated object for that key. However, when
# `CheckInitialized()` is called, it uses iteritems from `ATTRIBUTES` in order
# to generate a list of keys to validate. This expects the `BuiltinHandler`
# instance to contain every item in `ATTRIBUTES`, which contains every
# built-in name seen so far by any `BuiltinHandler`. To work around this,
# `__getattr__` always returns `None` for public attribute names. Note that
# `__getattr__` is only called if `__dict__` does not contain the key. Thus,
# the single built-in value set is validated.
#
# What's important to know is that in this implementation, only the keys in
# `ATTRIBUTES` matter, and only the values in `__dict__` matter. The values in
# `ATTRIBUTES` and the keys in `__dict__` are both ignored. The key in
# `__dict__` is only used for the `__getattr__` function, but to find out what
# keys are available, only `ATTRIBUTES` is ever read.
class DynamicAttributes(dict):
"""Provides a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any `get` operation. The fixed
value that you pass in as a constructor parameter should be a
`validation.Validated` object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensures all BuiltinHandler objects at least use the `default` attribute.
Args:
**attributes: The attributes that you want to use.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Allows `ATTRIBUTES.iteritems()` to return set of items that have values.
Whenever `validate` calls `iteritems()`, it is always called on
`ATTRIBUTES`, not on `__dict__`, so this override is important to ensure
that functions such as `ToYAML()` return the correct set of keys.
Args:
key: The key for the `iteritem` that you want to set.
value: The value for the `iteritem` that you want to set.
Raises:
MultipleBuiltinsSpecified: If more than one built-in is defined in a list
element.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
# Only the name of a built-in handler is currently allowed as an attribute
# so the object can only be set once. If later attributes are desired of
# a different form, this clause should be used to catch whenever more than
# one object does not match a predefined attribute name.
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
# `__getattr__` is only called for attributes that don't exist in the
# instance dictionary.
raise AttributeError
return None
def GetUnnormalized(self, key):
try:
return super(BuiltinHandler, self).GetUnnormalized(key)
except AttributeError:
return getattr(self, key)
def ToDict(self):
"""Converts a `BuiltinHander` object to a dictionary.
Returns:
A dictionary in `{builtin_handler_name: on/off}` form
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Finds if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: A list of `BuiltinHandler` objects, typically
`yaml.builtins`.
builtin_name: The name of the built-in that you want to determine whether
it is defined.
Returns:
`True` if `builtin_name` is defined by a member of `builtins_list`; all
other results return `False`.
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of `BuiltinHandler` objects.
Args:
builtins_list: A list of `BuildinHandler` objects to convert to tuples.
Returns:
A list of `(name, status)` that is derived from the `BuiltinHandler`
objects.
"""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verifies that all `BuiltinHandler` objects are valid and not repeated.
Args:
builtins_list: A list of `BuiltinHandler` objects to validate.
runtime: If you specify this argument, warnings are generated for
built-ins that have been deprecated in the given runtime.
Raises:
InvalidBuiltinFormat: If the name of a `BuiltinHandler` object cannot be
determined.
DuplicateBuiltinsSpecified: If a `BuiltinHandler` name is used more than
once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
# This checking must be done here rather than in `apphosting/ext/builtins`
# because `apphosting/ext/builtins` cannot differentiate between between
# built-ins specified in `app.yaml` versus ones added in a built-in
# include. There is a hole here where warnings are not generated for
# deprecated built-ins that appear in user-created include files.
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing `api_config` handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
# Make `URL` and `SCRIPT` required for `api_config` stanza
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Determines if the library configuration is not valid.
Raises:
appinfo_errors.InvalidLibraryName: If the specified library is not
supported.
appinfo_errors.InvalidLibraryVersion: If the specified library version is
not supported.
"""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version == 'latest':
self.version = supported_library.latest_version
elif self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
use_vers = '", "'.join(supported_library.non_deprecated_versions)
logging.warning(
'%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT,
self.name,
self.version,
use_vers)
class CpuUtilization(validation.Validated):
"""Class representing the configuration of VM CPU utilization."""
ATTRIBUTES = {
CPU_UTILIZATION_UTILIZATION: validation.Optional(
validation.Range(1e-6, 1.0, float)),
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC: validation.Optional(
validation.Range(1, sys.maxsize)),
}
class CustomMetric(validation.Validated):
"""Class representing CustomMetrics in AppInfoExternal."""
ATTRIBUTES = {
METRIC_NAME: validation.Regex(_NON_WHITE_SPACE_REGEX),
TARGET_TYPE: validation.Regex(TARGET_TYPE_REGEX),
CUSTOM_METRIC_UTILIZATION: validation.Optional(validation.TYPE_FLOAT),
SINGLE_INSTANCE_ASSIGNMENT: validation.Optional(validation.TYPE_FLOAT),
FILTER: validation.Optional(validation.TYPE_STR),
}
def CheckInitialized(self):
"""Determines if the CustomMetric is not valid.
Raises:
appinfo_errors.TooManyAutoscalingUtilizationTargetsError: If too many
scaling targets are set.
appinfo_errors.NotEnoughAutoscalingUtilizationTargetsError: If no scaling
targets are set.
"""
super(CustomMetric, self).CheckInitialized()
if bool(self.target_utilization) and bool(self.single_instance_assignment):
raise appinfo_errors.TooManyAutoscalingUtilizationTargetsError(
("There may be only one of '%s' or '%s'." % CUSTOM_METRIC_UTILIZATION,
SINGLE_INSTANCE_ASSIGNMENT))
elif not (bool(self.target_utilization) or
bool(self.single_instance_assignment)):
raise appinfo_errors.NotEnoughAutoscalingUtilizationTargetsError(
("There must be one of '%s' or '%s'." % CUSTOM_METRIC_UTILIZATION,
SINGLE_INSTANCE_ASSIGNMENT))
class EndpointsApiService(validation.Validated):
"""Class representing EndpointsApiService in AppInfoExternal."""
ATTRIBUTES = {
ENDPOINTS_NAME:
validation.Regex(_NON_WHITE_SPACE_REGEX),
ROLLOUT_STRATEGY:
validation.Optional(
validation.Options(ROLLOUT_STRATEGY_FIXED,
ROLLOUT_STRATEGY_MANAGED)),
CONFIG_ID:
validation.Optional(_NON_WHITE_SPACE_REGEX),
TRACE_SAMPLING:
validation.Optional(validation.TYPE_BOOL),
}
def CheckInitialized(self):
"""Determines if the Endpoints API Service is not valid.
Raises:
appinfo_errors.MissingEndpointsConfigId: If the config id is missing when
the rollout strategy is unspecified or set to "fixed".
appinfo_errors.UnexpectedEndpointsConfigId: If the config id is set when
the rollout strategy is "managed".
"""
super(EndpointsApiService, self).CheckInitialized()
if (self.rollout_strategy != ROLLOUT_STRATEGY_MANAGED and
self.config_id is None):
raise appinfo_errors.MissingEndpointsConfigId(
'config_id must be specified when rollout_strategy is unspecified or'
' set to "fixed"')
elif (self.rollout_strategy == ROLLOUT_STRATEGY_MANAGED and
self.config_id is not None):
raise appinfo_errors.UnexpectedEndpointsConfigId(
'config_id is forbidden when rollout_strategy is set to "managed"')
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES:
validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES:
validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY:
validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY:
validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST:
validation.Optional(_CONCURRENT_REQUESTS_REGEX),
# Attributes for VM-based AutomaticScaling.
MIN_NUM_INSTANCES:
validation.Optional(validation.Range(1, sys.maxsize)),
MAX_NUM_INSTANCES:
validation.Optional(validation.Range(1, sys.maxsize)),
COOL_DOWN_PERIOD_SEC:
validation.Optional(validation.Range(60, sys.maxsize, int)),
CPU_UTILIZATION:
validation.Optional(CpuUtilization),
STANDARD_MAX_INSTANCES:
validation.Optional(validation.TYPE_INT),
STANDARD_MIN_INSTANCES:
validation.Optional(validation.TYPE_INT),
STANDARD_TARGET_CPU_UTILIZATION:
validation.Optional(validation.TYPE_FLOAT),
STANDARD_TARGET_THROUGHPUT_UTILIZATION:
validation.Optional(validation.TYPE_FLOAT),
TARGET_NETWORK_SENT_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_NETWORK_SENT_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_DISK_WRITE_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_DISK_WRITE_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_DISK_READ_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_DISK_READ_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_REQUEST_COUNT_PER_SEC:
validation.Optional(validation.Range(1, sys.maxsize)),
TARGET_CONCURRENT_REQUESTS:
validation.Optional(validation.Range(1, sys.maxsize)),
CUSTOM_METRICS: validation.Optional(validation.Repeated(CustomMetric)),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class RuntimeConfig(validation.ValidatedDict):
"""Class for "vanilla" runtime configuration.
Fields used vary by runtime, so validation is delegated to the per-runtime
build processes.
These are intended to be used during Dockerfile generation, not after VM boot.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
The settings are not further validated here. The settings are validated on
the server side.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
"""Merges two `VmSettings` instances.
If a variable is specified by both instances, the value from
`vm_settings_one` is used.
Args:
vm_settings_one: The first `VmSettings` instance, or `None`.
vm_settings_two: The second `VmSettings` instance, or `None`.
Returns:
The merged `VmSettings` instance, or `None` if both input instances are
`None` or empty.
"""
# Note that `VmSettings.copy()` results in a dict.
result_vm_settings = (vm_settings_two or {}).copy()
# TODO(user): Apply merge logic when feature is fully defined.
# For now, we will merge the two dict and `vm_settings_one` will win
# if key collides.
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class BetaSettings(VmSettings):
"""Class for Beta (internal or unreleased) settings.
This class is meant to replace `VmSettings` eventually.
Note:
All new beta settings must be registered in `shared_constants.py`.
These settings are not validated further here. The settings are validated on
the server side.
"""
@classmethod
def Merge(cls, beta_settings_one, beta_settings_two):
"""Merges two `BetaSettings` instances.
Args:
beta_settings_one: The first `BetaSettings` instance, or `None`.
beta_settings_two: The second `BetaSettings` instance, or `None`.
Returns:
The merged `BetaSettings` instance, or `None` if both input instances are
`None` or empty.
"""
merged = VmSettings.Merge(beta_settings_one, beta_settings_two)
return BetaSettings(**merged.ToDict()) if merged else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key/value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, env_variables_one, env_variables_two):
"""Merges two `EnvironmentVariables` instances.
If a variable is specified by both instances, the value from
`env_variables_two` is used.
Args:
env_variables_one: The first `EnvironmentVariables` instance or `None`.
env_variables_two: The second `EnvironmentVariables` instance or `None`.
Returns:
The merged `EnvironmentVariables` instance, or `None` if both input
instances are `None` or empty.
"""
# Note that `EnvironmentVariables.copy()` results in a dict.
result_env_variables = (env_variables_one or {}).copy()
result_env_variables.update(env_variables_two or {})
return (EnvironmentVariables(**result_env_variables)
if result_env_variables else None)
def ValidateSourceReference(ref):
"""Determines if a source reference is valid.
Args:
ref: A source reference in the following format:
`[repository_uri#]revision`.
Raises:
ValidationError: If the reference is malformed.
"""
repo_revision = ref.split('#', 1)
revision_id = repo_revision[-1]
if not re.match(SOURCE_REVISION_RE_STRING, revision_id):
raise validation.ValidationError('Bad revision identifier: %s' %
revision_id)
if len(repo_revision) == 2:
uri = repo_revision[0]
if not re.match(SOURCE_REPO_RE_STRING, uri):
raise validation.ValidationError('Bad repository URI: %s' % uri)
def ValidateCombinedSourceReferencesString(source_refs):
"""Determines if `source_refs` contains a valid list of source references.
Args:
source_refs: A multi-line string containing one source reference per line.
Raises:
ValidationError: If the reference is malformed.
"""
if len(source_refs) > SOURCE_REFERENCES_MAX_SIZE:
raise validation.ValidationError(
'Total source reference(s) size exceeds the limit: %d > %d' % (
len(source_refs), SOURCE_REFERENCES_MAX_SIZE))
for ref in source_refs.splitlines():
ValidateSourceReference(ref.strip())
class HealthCheck(validation.Validated):
"""Class representing the health check configuration."""
ATTRIBUTES = {
ENABLE_HEALTH_CHECK: validation.Optional(validation.TYPE_BOOL),
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxsize)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxsize)),
UNHEALTHY_THRESHOLD: validation.Optional(
validation.Range(0, sys.maxsize)),
HEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxsize)),
RESTART_THRESHOLD: validation.Optional(validation.Range(0, sys.maxsize)),
HOST: validation.Optional(validation.TYPE_STR)}
class LivenessCheck(validation.Validated):
"""Class representing the liveness check configuration."""
ATTRIBUTES = {
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxsize)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxsize)),
FAILURE_THRESHOLD: validation.Optional(validation.Range(0, sys.maxsize)),
SUCCESS_THRESHOLD: validation.Optional(validation.Range(0, sys.maxsize)),
INITIAL_DELAY_SEC: validation.Optional(validation.Range(0, sys.maxsize)),
PATH: validation.Optional(validation.TYPE_STR),
HOST: validation.Optional(validation.TYPE_STR)}
class ReadinessCheck(validation.Validated):
"""Class representing the readiness check configuration."""
ATTRIBUTES = {
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxsize)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxsize)),
APP_START_TIMEOUT_SEC: validation.Optional(
validation.Range(0, sys.maxsize)),
FAILURE_THRESHOLD: validation.Optional(validation.Range(0, sys.maxsize)),
SUCCESS_THRESHOLD: validation.Optional(validation.Range(0, sys.maxsize)),
PATH: validation.Optional(validation.TYPE_STR),
HOST: validation.Optional(validation.TYPE_STR)}
class VmHealthCheck(HealthCheck):
"""Class representing the configuration of the VM health check.
Note:
This class is deprecated and will be removed in a future release. Use
`HealthCheck` instead.
"""
pass
class Volume(validation.Validated):
"""Class representing the configuration of a volume."""
ATTRIBUTES = {
VOLUME_NAME: validation.TYPE_STR,
SIZE_GB: validation.TYPE_FLOAT,
VOLUME_TYPE: validation.TYPE_STR,
}
class Resources(validation.Validated):
"""Class representing the configuration of VM resources."""
ATTRIBUTES = {
CPU: validation.Optional(validation.TYPE_FLOAT),
MEMORY_GB: validation.Optional(validation.TYPE_FLOAT),
DISK_SIZE_GB: validation.Optional(validation.TYPE_INT),
VOLUMES: validation.Optional(validation.Repeated(Volume))
}
class Network(validation.Validated):
"""Class representing the VM network configuration."""
ATTRIBUTES = {
# A list of port mappings in the form 'port' or 'external:internal'.
FORWARDED_PORTS: validation.Optional(validation.Repeated(validation.Regex(
'[0-9]+(:[0-9]+)?(/(udp|tcp))?'))),
INSTANCE_TAG: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
NETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
SUBNETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
SESSION_AFFINITY:
validation.Optional(bool)
}
class VpcAccessConnector(validation.Validated):
"""Class representing the VPC Access connector configuration."""
ATTRIBUTES = {
VPC_ACCESS_CONNECTOR_NAME:
validation.Regex(VPC_ACCESS_CONNECTOR_NAME_REGEX),
}
class AppInclude(validation.Validated):
"""Class representing the contents of an included `app.yaml` file.
This class is used for both `builtins` and `includes` directives.
"""
# TODO(user): It probably makes sense to have a scheme where we do a
# deep-copy of fields from `AppInfoExternal` when setting the `ATTRIBUTES`
# here. Right now it's just copypasta.
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BETA_SETTINGS: validation.Optional(BetaSettings),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
SKIP_FILES: validation.RegexStr(default=SKIP_NO_FILES),
# TODO(user): add `LIBRARIES` here when we have a good story for
# handling contradictory library requests.
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of `<manual_scaling.instances>` from the arguments.
`appinclude_one` is mutated to be the merged result in this process.
Also, this function must be updated if `ManualScaling` gets additional
fields.
Args:
appinclude_one: The first object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
appinclude_two: The second object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
Returns:
An object that is the result of merging
`appinclude_one.manual_scaling.instances` and
`appinclude_two.manual_scaling.instances`; this is returned as a revised
`appinclude_one` object after the mutations are complete.
"""
def _Instances(appinclude):
"""Determines the number of `manual_scaling.instances` sets.
Args:
appinclude: The include for which you want to determine the number of
`manual_scaling.instances` sets.
Returns:
The number of instances as an integer, or `None`.
"""
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
# We only want to mutate a param if at least one of the given
# arguments has manual_scaling.instances set.
if _Instances(appinclude_one) or _Instances(appinclude_two):
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def _CommonMergeOps(cls, one, two):
"""This function performs common merge operations.
Args:
one: The first object that you want to merge.
two: The second object that you want to merge.
Returns:
An updated `one` object containing all merged data.
"""
# Merge `ManualScaling`.
AppInclude.MergeManualScaling(one, two)
# Merge `AdminConsole` objects.
one.admin_console = AdminConsole.Merge(one.admin_console,
two.admin_console)
# Preserve the specific value of `one.vm` (`None` or `False`) when neither
# are `True`.
one.vm = two.vm or one.vm
# Merge `VmSettings` objects.
one.vm_settings = VmSettings.Merge(one.vm_settings,
two.vm_settings)
# Merge `BetaSettings` objects.
if hasattr(one, 'beta_settings'):
one.beta_settings = BetaSettings.Merge(one.beta_settings,
two.beta_settings)
# Merge `EnvironmentVariables` objects. The values in `two.env_variables`
# override the ones in `one.env_variables` in case of conflict.
one.env_variables = EnvironmentVariables.Merge(one.env_variables,
two.env_variables)
one.skip_files = cls.MergeSkipFiles(one.skip_files, two.skip_files)
return one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""Merges an `app.yaml` file with referenced builtins/includes.
Args:
appyaml: The `app.yaml` file that you want to update with `appinclude`.
appinclude: The includes that you want to merge into `appyaml`.
Returns:
An updated `app.yaml` file that includes the directives you specified in
`appinclude`.
"""
# All merge operations should occur in this function or in functions
# referenced from this one. That makes it much easier to understand what
# goes wrong when included files are not merged correctly.
if not appinclude:
return appyaml
# Merge handlers while paying attention to `position` attribute.
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
# Get rid of the `position` attribute since we no longer need it, and is
# technically invalid to include in the resulting merged `app.yaml` file
# that will be sent when deploying the application.
h.position = None
appyaml.handlers.extend(tail)
appyaml = cls._CommonMergeOps(appyaml, appinclude)
appyaml.NormalizeVmSettings()
return appyaml
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""Merges the non-referential state of the provided `AppInclude`.
That is, `builtins` and `includes` directives are not preserved, but any
static objects are copied into an aggregate `AppInclude` object that
preserves the directives of both provided `AppInclude` objects.
`appinclude_one` is updated to be the merged result in this process.
Args:
appinclude_one: First `AppInclude` to merge.
appinclude_two: Second `AppInclude` to merge.
Returns:
`AppInclude` object that is the result of merging the static directives of
`appinclude_one` and `appinclude_two`. An updated version of
`appinclude_one` is returned.
"""
# If one or both `appinclude` objects were `None`, return the object that
# was not `None` or return `None`.
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
# Now, both `appincludes` are non-`None`.
# Merge handlers.
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
return cls._CommonMergeOps(appinclude_one, appinclude_two)
@staticmethod
def MergeSkipFiles(skip_files_one, skip_files_two):
"""Merges two `skip_files` directives.
Args:
skip_files_one: The first `skip_files` element that you want to merge.
skip_files_two: The second `skip_files` element that you want to merge.
Returns:
A list of regular expressions that are merged.
"""
if skip_files_one == SKIP_NO_FILES:
return skip_files_two
if skip_files_two == SKIP_NO_FILES:
return skip_files_one
return validation.RegexStr().Validate(
[skip_files_one, skip_files_two], SKIP_FILES)
# We exploit the handling of RegexStr where regex properties can be
# specified as a list of regexes that are then joined with |.
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a `yaml_object` builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language. For example,
you could specify `php-quercus` if this is a Java app that was generated
from PHP source using Quercus.
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific `expiration` set.
See the documentation for the `URLMap.expiration` field for more
information.
skip_files: A regular expression object. Files that match this regular
expression will not be uploaded by `appcfg.py`. For example::
skip_files: |
.svn.*|
#.*#
nobuild_files: A regular expression object. Files that match this regular
expression will not be built into the app. This directive is valid for
Go only.
api_config: URL root and script or servlet path for enhanced API serving.
"""
ATTRIBUTES = {
# Regular expressions for these attributes are defined in
# //apphosting/base/id_util.cc.
APPLICATION: validation.Optional(APPLICATION_RE_STRING),
# An alias for `APPLICATION`.
PROJECT: validation.Optional(APPLICATION_RE_STRING),
SERVICE: validation.Preferred(MODULE,
validation.Optional(MODULE_ID_RE_STRING)),
MODULE: validation.Deprecated(SERVICE,
validation.Optional(MODULE_ID_RE_STRING)),
VERSION: validation.Optional(MODULE_VERSION_ID_RE_STRING),
RUNTIME: validation.Optional(RUNTIME_RE_STRING),
RUNTIME_CHANNEL: validation.Optional(validation.Type(str)),
# A new `api_version` requires a release of the `dev_appserver`, so it
# is ok to hardcode the version names here.
API_VERSION: validation.Optional(API_VERSION_RE_STRING),
# The App Engine environment to run this version in. (VM vs. non-VM, etc.)
ENV: validation.Optional(ENV_RE_STRING),
ENDPOINTS_API_SERVICE: validation.Optional(EndpointsApiService),
# The SDK will use this for generated Dockerfiles
# hasattr guard the new Exec() validator temporarily
ENTRYPOINT: validation.Optional(
validation.Exec() if hasattr(
validation, 'Exec') else validation.Type(str)),
RUNTIME_CONFIG: validation.Optional(RuntimeConfig),
INSTANCE_CLASS: validation.Optional(validation.Type(str)),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings), # Deprecated
BETA_SETTINGS: validation.Optional(BetaSettings),
VM_HEALTH_CHECK: validation.Optional(VmHealthCheck), # Deprecated
HEALTH_CHECK: validation.Optional(HealthCheck),
RESOURCES: validation.Optional(Resources),
LIVENESS_CHECK: validation.Optional(LivenessCheck),
READINESS_CHECK: validation.Optional(ReadinessCheck),
NETWORK: validation.Optional(Network),
VPC_ACCESS_CONNECTOR: validation.Optional(VpcAccessConnector),
ZONES: validation.Optional(validation.Repeated(validation.TYPE_STR)),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
# TODO(user): change to a regex when `validation.Repeated` supports it
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
DATASTORE_AUTO_ID_POLICY: validation.Optional(
validation.Options(DATASTORE_ID_POLICY_LEGACY,
DATASTORE_ID_POLICY_DEFAULT)),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
STANDARD_WEBSOCKET: validation.Optional(bool),
}
def CheckInitialized(self):
"""Performs non-regular expression-based validation.
The following are verified:
- At least one URL mapping is provided in the URL mappers.
- The number of URL mappers doesn't exceed `MAX_URL_MAPS`.
- The major version does not contain the string `-dot-`.
- If `api_endpoints` are defined, an `api_config` stanza must be
defined.
- If the `runtime` is `python27` and `threadsafe` is set, then no CGI
handlers can be used.
- The version name doesn't start with `BUILTIN_NAME_PREFIX`.
- If `redirect_http_response_code` exists, it is in the list of valid
300s.
- Module and service aren't both set. Services were formerly known as
modules.
Raises:
DuplicateLibrary: If `library_name` is specified more than once.
MissingURLMapping: If no `URLMap` object is present in the object.
TooManyURLMappings: If there are too many `URLMap` entries.
MissingApiConfig: If `api_endpoints` exists without an `api_config`.
MissingThreadsafe: If `threadsafe` is not set but the runtime requires it.
ThreadsafeWithCgiHandler: If the `runtime` is `python27`, `threadsafe` is
set and CGI handlers are specified.
TooManyScalingSettingsError: If more than one scaling settings block is
present.
RuntimeDoesNotSupportLibraries: If the libraries clause is used for a
runtime that does not support it, such as `python25`.
"""
super(AppInfoExternal, self).CheckInitialized()
if self.runtime is None and not self.IsVm():
raise appinfo_errors.MissingRuntimeError(
'You must specify a "runtime" field for non-vm applications.')
elif self.runtime is None:
# Default optional to custom (we don't do that in attributes just so
# we know that it's been defaulted)
self.runtime = 'custom'
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
vm_runtime_python27 = (
self.runtime == 'vm' and
(hasattr(self, 'vm_settings') and
self.vm_settings and
self.vm_settings.get('vm_runtime') == 'python27') or
(hasattr(self, 'beta_settings') and
self.beta_settings and
self.beta_settings.get('vm_runtime') == 'python27'))
if (self.threadsafe is None and
(self.runtime == 'python27' or vm_runtime_python27)):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to a true or false YAML value')
if self.auto_id_policy == DATASTORE_ID_POLICY_LEGACY:
datastore_auto_ids_url = ('http://developers.google.com/'
'appengine/docs/python/datastore/'
'entities#Kinds_and_Identifiers')
appcfg_auto_ids_url = ('http://developers.google.com/appengine/docs/'
'python/config/appconfig#auto_id_policy')
logging.warning(
"You have set the datastore auto_id_policy to 'legacy'. It is "
"recommended that you select 'default' instead.\n"
"Legacy auto ids are deprecated. You can continue to allocate\n"
"legacy ids manually using the allocate_ids() API functions.\n"
"For more information see:\n"
+ datastore_auto_ids_url + '\n' + appcfg_auto_ids_url + '\n')
if (hasattr(self, 'beta_settings') and self.beta_settings
and self.beta_settings.get('source_reference')):
ValidateCombinedSourceReferencesString(
self.beta_settings.get('source_reference'))
if self.libraries:
if not (vm_runtime_python27 or self.runtime == 'python27'):
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if self.threadsafe and self.runtime == 'python27':
# VMEngines can handle python25 handlers, so we don't include
# vm_runtime_python27 in the if statement above.
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all `Library` instances active for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries as well as any required
dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized `Library` instances for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries, their required dependencies, and
any libraries enabled by default. Any libraries with `latest` as their
version will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the `AppInfoExternal`.
Backend entries can contain directives that modify other parts of the
`app.yaml` file, such as the `start` directive, which adds a handler for the
start request. This method performs those modifications.
Args:
backend_name: The name of a backend that is defined in the `backends`
directive.
Raises:
BackendNotFound: If the indicated backend was not listed in the
`backends` directive.
DuplicateBackend: If the backend is found more than once in the `backends`
directive.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def GetEffectiveRuntime(self):
"""Returns the app's runtime, resolving VMs to the underlying `vm_runtime`.
Returns:
The effective runtime: The value of `beta/vm_settings.vm_runtime` if
`runtime` is `vm`, or `runtime` otherwise.
"""
if (self.runtime == 'vm' and hasattr(self, 'vm_settings')
and self.vm_settings is not None):
return self.vm_settings.get('vm_runtime')
if (self.runtime == 'vm' and hasattr(self, 'beta_settings')
and self.beta_settings is not None):
return self.beta_settings.get('vm_runtime')
return self.runtime
def SetEffectiveRuntime(self, runtime):
"""Sets the runtime while respecting vm runtimes rules for runtime settings.
Args:
runtime: The runtime to use.
"""
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
# Patch up vm runtime setting. Copy `runtime` to `vm_runtime` and set
# runtime to the string `vm`.
self.vm_settings['vm_runtime'] = runtime
self.runtime = 'vm'
else:
self.runtime = runtime
def NormalizeVmSettings(self):
"""Normalizes VM settings."""
# NOTE(user): In the input files, `vm` is not a type of runtime, but
# rather is specified as `vm: true|false`. In the code, `vm` is represented
# as a value of `AppInfoExternal.runtime`.
# NOTE(user): This hack is only being applied after the parsing of
# `AppInfoExternal`. If the `vm` attribute can ever be specified in the
# `AppInclude`, then this processing will need to be done there too.
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
if 'vm_runtime' not in self.vm_settings:
self.SetEffectiveRuntime(self.runtime)
# Copy fields that are automatically added by the SDK or this class
# to `beta_settings`.
if hasattr(self, 'beta_settings') and self.beta_settings:
# Only copy if `beta_settings` already exists, because we have logic in
# `appversion.py` to discard all of `vm_settings` if anything is in
# `beta_settings`. So we won't create an empty one just to add these
# fields.
for field in ['vm_runtime',
'has_docker_image',
'image',
'module_yaml_path']:
if field not in self.beta_settings and field in self.vm_settings:
self.beta_settings[field] = self.vm_settings[field]
# TODO(user): `env` replaces `vm`. Remove `vm` when field is removed.
def IsVm(self):
return (self.vm or
self.env in ['2', 'flex', 'flexible'])
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (`URLMap`) objects.
Args:
handlers: A list of a handler (`URLMap`) objects.
is_include_file: If this argument is set to `True`, the handlers that are
added as part of the `includes` directive are validated.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Loads a single `AppInfo` object where one and only one is expected.
This method validates that the values in the `AppInfo` match the
validators that are defined in this file, in particular,
`AppInfoExternal.ATTRIBUTES`.
Args:
app_info: A file-like object or string. If the argument is a string, the
argument is parsed as a configuration file. If the argument is a
file-like object, the data is read, then parsed.
Returns:
An instance of `AppInfoExternal` as loaded from a YAML file.
Raises:
ValueError: If a specified service is not valid.
EmptyConfigurationFile: If there are no documents in YAML file.
MultipleConfigurationFile: If more than one document exists in the YAML
file.
DuplicateBackend: If a backend is found more than once in the `backends`
directive.
yaml_errors.EventError: If the `app.yaml` file fails validation.
appinfo_errors.MultipleProjectNames: If the `app.yaml` file has both an
`application` directive and a `project` directive.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
# Allow `project: name` as an alias for `application: name`. If found, we
# change the `project` field to `None`. (Deleting it would make a distinction
# between loaded and constructed `AppInfoExternal` objects, since the latter
# would still have the project field.)
if appyaml.application and appyaml.project:
raise appinfo_errors.MultipleProjectNames(
'Specify one of "application: name" or "project: name"')
elif appyaml.project:
appyaml.application = appyaml.project
appyaml.project = None
appyaml.NormalizeVmSettings()
return appyaml
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
This class is used to pass back information about the newly created app to
users after a new version has been created.
"""
# NOTE(user): Before you consider adding anything to this YAML definition,
# you must solve the issue that old SDK versions will try to parse this new
# value with the old definition and fail. Basically we are stuck with this
# definition for the time being. The parsing of the value is done in
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: MODULE_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Loads a single `AppInclude` object where one and only one is expected.
Args:
app_include: A file-like object or string. The argument is set to a string,
the argument is parsed as a configuration file. If the argument is set
to a file-like object, the data is read and parsed.
Returns:
An instance of `AppInclude` as loaded from a YAML file.
Raises:
EmptyConfigurationFile: If there are no documents in the YAML file.
MultipleConfigurationFile: If there is more than one document in the YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches `_DELTA_REGEX`.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
#####################################################################
# These regexps must be the same as those in:
# - apphosting/api/app_config/request_validator.cc
# - java/com/google/appengine/tools/admin/AppVersionUpload.java
# - java/com/google/apphosting/admin/legacy/LegacyAppInfo.java
# LINT.IfChange
# Forbid `.`, `..`, and leading `-`, `_ah/` or `/`
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/|^/')
# Forbid `//` and trailing `/`
_file_path_negative_2_re = re.compile(r'//|/$')
# Forbid any use of space other than in the middle of a directory
# or file name. Forbid line feeds and carriage returns.
_file_path_negative_3_re = re.compile(r'^ | $|/ | /|\r|\n')
# (erinjerison) Lint seems to think I'm specifying the word "character" as an
# argument. This isn't the case; it's part of a list to enable the list to
# build properly. Disabling it for now.
# pylint: disable=g-doc-args
def ValidFilename(filename):
"""Determines if a file name is valid.
Args:
filename: The file name to validate. The file name must be a valid file
name:
- It must only contain letters, numbers, and the following special
characters: `@`, `_`, `+`, `/` `$`, `.`, `-`, or '~'.
- It must be less than 256 characters.
- It must not contain `/./`, `/../`, or `//`.
- It must not end in `/`.
- All spaces must be in the middle of a directory or file name.
Returns:
An error string if the file name is invalid. `''` is returned if the file
name is valid.
"""
if not filename:
return 'Filename cannot be empty'
if len(filename) > 1024:
return 'Filename cannot exceed 1024 characters: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
# LINT.ThenChange(
|
from django.apps import apps
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from .models import Schema, Table, Column
@receiver(post_save, sender=Schema)
def schema_updated_handler(sender, **kwargs):
if apps.is_installed('daiquiri.oai'):
from daiquiri.oai.utils import update_records
update_records('schema', kwargs['instance'])
@receiver(post_delete, sender=Schema)
def schema_deleted_handler(sender, **kwargs):
if apps.is_installed('daiquiri.oai'):
from daiquiri.oai.utils import delete_records
delete_records('schema', kwargs['instance'])
@receiver(post_save, sender=Table)
def table_updated_handler(sender, **kwargs):
if apps.is_installed('daiquiri.oai'):
from daiquiri.oai.utils import update_records
update_records('table', kwargs['instance'])
@receiver(post_delete, sender=Table)
def table_deleted_handler(sender, **kwargs):
if apps.is_installed('daiquiri.oai'):
from daiquiri.oai.utils import delete_records
delete_records('table', kwargs['instance'])
@receiver(post_save, sender=Column)
def column_updated_handler(sender, **kwargs):
cache.delete('processor')
|
from pypy.interpreter.typedef import (TypeDef, GetSetProperty,
interp_attrproperty, interp_attrproperty_w)
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import unwrap_spec, interp2app
from pypy.interpreter.pycode import PyCode
from pypy.interpreter.error import OperationError
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance, hlstr
from rpython.rtyper.rclass import OBJECT
#from rpython.jit.metainterp.resoperation import rop
from rpython.rlib.nonconst import NonConstant
from rpython.rlib.rarithmetic import r_uint
from rpython.rlib import jit_hooks
from rpython.rlib.jit import Counters
from rpython.rlib.objectmodel import compute_unique_id
from pypy.module.pypyjit.interp_jit import pypyjitdriver
class Cache(object):
in_recursion = False
no = 0
def __init__(self, space):
self.w_compile_hook = space.w_None
self.w_abort_hook = space.w_None
self.w_trace_too_long_hook = space.w_None
def getno(self):
self.no += 1
return self.no - 1
def wrap_greenkey(space, jitdriver, greenkey, greenkey_repr):
if greenkey is None:
return space.w_None
jitdriver_name = jitdriver.name
if jitdriver_name == 'pypyjit':
next_instr = greenkey[0].getint()
is_being_profiled = greenkey[1].getint()
ll_code = lltype.cast_opaque_ptr(lltype.Ptr(OBJECT),
greenkey[2].getref_base())
pycode = cast_base_ptr_to_instance(PyCode, ll_code)
return space.newtuple([space.wrap(pycode), space.wrap(next_instr),
space.newbool(bool(is_being_profiled))])
else:
return space.wrap(greenkey_repr)
@unwrap_spec(operations=bool)
def set_compile_hook(space, w_hook, operations=True):
""" set_compile_hook(hook, operations=True)
Set a compiling hook that will be called each time a loop is compiled.
The hook will be called with the pypyjit.JitLoopInfo object. Refer to it's
docstring for details.
Note that jit hook is not reentrant. It means that if the code
inside the jit hook is itself jitted, it will get compiled, but the
jit hook won't be called for that.
"""
cache = space.fromcache(Cache)
assert w_hook is not None
cache.w_compile_hook = w_hook
cache.compile_hook_with_ops = operations
cache.in_recursion = NonConstant(False)
def set_abort_hook(space, w_hook):
""" set_abort_hook(hook)
Set a hook (callable) that will be called each time there is tracing
aborted due to some reason.
The hook will be called with the signature:
hook(jitdriver_name, greenkey, reason, operations)
Reason is a string, the meaning of other arguments is the same
as attributes on JitLoopInfo object.
"""
cache = space.fromcache(Cache)
assert w_hook is not None
cache.w_abort_hook = w_hook
cache.in_recursion = NonConstant(False)
def set_trace_too_long_hook(space, w_hook):
""" set_trace_too_long_hook(hook)
Set a hook (callable) that will be called each time we abort
tracing because the trace is too long.
The hook will be called with the signature:
hook(jitdriver_name, greenkey)
"""
cache = space.fromcache(Cache)
assert w_hook is not None
cache.w_trace_too_long_hook = w_hook
cache.in_recursion = NonConstant(False)
def wrap_oplist(space, logops, operations, ops_offset=None):
# this function is called from the JIT
from rpython.jit.metainterp.resoperation import rop
l_w = []
jitdrivers_sd = logops.metainterp_sd.jitdrivers_sd
for op in operations:
if ops_offset is None:
ofs = -1
else:
ofs = ops_offset.get(op, 0)
num = op.getopnum()
name = op.getopname()
if num == rop.DEBUG_MERGE_POINT:
jd_sd = jitdrivers_sd[op.getarg(0).getint()]
greenkey = op.getarglist()[3:]
repr = jd_sd.warmstate.get_location_str(greenkey)
w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr)
l_w.append(DebugMergePoint(space, name,
logops.repr_of_resop(op),
jd_sd.jitdriver.name,
op.getarg(1).getint(),
op.getarg(2).getint(),
w_greenkey))
elif op.is_guard():
l_w.append(GuardOp(name, ofs, logops.repr_of_resop(op),
op.getdescr().get_jitcounter_hash()))
else:
l_w.append(WrappedOp(name, ofs, logops.repr_of_resop(op)))
return l_w
@unwrap_spec(offset=int, repr=str, name=str)
def descr_new_resop(space, w_tp, name, offset=-1, repr=''):
return WrappedOp(name, offset, repr)
@unwrap_spec(offset=int, repr=str, name=str, hash=r_uint)
def descr_new_guardop(space, w_tp, name, offset=-1, repr='', hash=r_uint(0)):
return GuardOp(name, offset, repr, hash)
@unwrap_spec(repr=str, name=str, jd_name=str, call_depth=int, call_id=int)
def descr_new_dmp(space, w_tp, name, repr, jd_name, call_depth, call_id,
w_greenkey):
return DebugMergePoint(space, name,
repr, jd_name, call_depth, call_id, w_greenkey)
class WrappedOp(W_Root):
""" A class representing a single ResOperation, wrapped nicely
"""
def __init__(self, name, offset, repr_of_resop):
self.offset = offset
self.name = name
self.repr_of_resop = repr_of_resop
def descr_repr(self, space):
return space.wrap(self.repr_of_resop)
def descr_name(self, space):
return space.wrap(self.name)
class GuardOp(WrappedOp):
def __init__(self, name, offset, repr_of_resop, hash):
WrappedOp.__init__(self, name, offset, repr_of_resop)
self.hash = hash
class DebugMergePoint(WrappedOp):
""" A class representing Debug Merge Point - the entry point
to a jitted loop.
"""
def __init__(self, space, name, repr_of_resop, jd_name, call_depth,
call_id, w_greenkey):
WrappedOp.__init__(self, name, -1, repr_of_resop)
self.jd_name = jd_name
self.call_depth = call_depth
self.call_id = call_id
self.w_greenkey = w_greenkey
def get_pycode(self, space):
if self.jd_name == pypyjitdriver.name:
return space.getitem(self.w_greenkey, space.wrap(0))
raise OperationError(space.w_AttributeError, space.wrap("This DebugMergePoint doesn't belong to the main Python JitDriver"))
def get_bytecode_no(self, space):
if self.jd_name == pypyjitdriver.name:
return space.getitem(self.w_greenkey, space.wrap(1))
raise OperationError(space.w_AttributeError, space.wrap("This DebugMergePoint doesn't belong to the main Python JitDriver"))
def get_jitdriver_name(self, space):
return space.wrap(self.jd_name)
WrappedOp.typedef = TypeDef(
'ResOperation',
__doc__ = WrappedOp.__doc__,
__new__ = interp2app(descr_new_resop),
__repr__ = interp2app(WrappedOp.descr_repr),
name = GetSetProperty(WrappedOp.descr_name),
offset = interp_attrproperty("offset", cls=WrappedOp),
)
WrappedOp.typedef.acceptable_as_base_class = False
GuardOp.typedef = TypeDef(
'GuardOp',
__doc__ = GuardOp.__doc__,
__new__ = interp2app(descr_new_guardop),
__repr__ = interp2app(GuardOp.descr_repr),
name = GetSetProperty(GuardOp.descr_name),
offset = interp_attrproperty("offset", cls=GuardOp),
hash = interp_attrproperty("hash", cls=GuardOp),
)
GuardOp.typedef.acceptable_as_base_class = False
DebugMergePoint.typedef = TypeDef(
'DebugMergePoint', WrappedOp.typedef,
__new__ = interp2app(descr_new_dmp),
__doc__ = DebugMergePoint.__doc__,
greenkey = interp_attrproperty_w("w_greenkey", cls=DebugMergePoint,
doc="Representation of place where the loop was compiled. "
"In the case of the main interpreter loop, it's a triplet "
"(code, ofs, is_profiled)"),
pycode = GetSetProperty(DebugMergePoint.get_pycode),
bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no,
doc="offset in the bytecode"),
call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint,
doc="Depth of calls within this loop"),
call_id = interp_attrproperty("call_id", cls=DebugMergePoint,
doc="Number of applevel function traced in this loop"),
jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name,
doc="Name of the jitdriver 'pypyjit' in the case "
"of the main interpreter loop"),
)
DebugMergePoint.typedef.acceptable_as_base_class = False
class W_JitLoopInfo(W_Root):
""" Loop debug information
"""
w_green_key = None
bridge_no = 0
asmaddr = 0
asmlen = 0
def __init__(self, space, debug_info, is_bridge=False, wrap_ops=True):
if wrap_ops:
memo = {}
logops = debug_info.logger._make_log_operations(memo)
if debug_info.asminfo is not None:
ofs = debug_info.asminfo.ops_offset
else:
ofs = {}
ops = debug_info.operations
self.w_ops = space.newlist(wrap_oplist(space, logops, ops, ofs))
else:
self.w_ops = space.w_None
self.jd_name = debug_info.get_jitdriver().name
self.type = debug_info.type
if is_bridge:
self.bridge_no = compute_unique_id(debug_info.fail_descr)
#self.bridge_no = debug_info.fail_descr_no
self.w_green_key = space.w_None
else:
self.w_green_key = wrap_greenkey(space,
debug_info.get_jitdriver(),
debug_info.greenkey,
debug_info.get_greenkey_repr())
self.loop_no = debug_info.looptoken.number
asminfo = debug_info.asminfo
if asminfo is not None:
self.asmaddr = asminfo.asmaddr
self.asmlen = asminfo.asmlen
def descr_repr(self, space):
lgt = space.int_w(space.len(self.w_ops))
if self.type == "bridge":
code_repr = 'bridge no %d' % self.bridge_no
else:
code_repr = space.str_w(space.repr(self.w_green_key))
return space.wrap('<JitLoopInfo %s, %d operations, starting at <%s>>' %
(self.jd_name, lgt, code_repr))
def descr_get_bridge_no(self, space):
if space.is_none(self.w_green_key):
return space.wrap(self.bridge_no)
raise OperationError(space.w_TypeError, space.wrap("not a bridge"))
@unwrap_spec(loopno=int, asmaddr=int, asmlen=int, loop_no=int,
type=str, jd_name=str, bridge_no=int)
def descr_new_jit_loop_info(space, w_subtype, w_greenkey, w_ops, loopno,
asmaddr, asmlen, loop_no, type, jd_name,
bridge_no=-1):
w_info = space.allocate_instance(W_JitLoopInfo, w_subtype)
w_info.w_green_key = w_greenkey
w_info.w_ops = w_ops
w_info.asmaddr = asmaddr
w_info.asmlen = asmlen
w_info.loop_no = loop_no
w_info.type = type
w_info.jd_name = jd_name
w_info.bridge_no = bridge_no
return w_info
W_JitLoopInfo.typedef = TypeDef(
'JitLoopInfo',
__doc__ = W_JitLoopInfo.__doc__,
__new__ = interp2app(descr_new_jit_loop_info),
jitdriver_name = interp_attrproperty('jd_name', cls=W_JitLoopInfo,
doc="Name of the JitDriver, pypyjit for the main one"),
greenkey = interp_attrproperty_w('w_green_key', cls=W_JitLoopInfo,
doc="Representation of place where the loop was compiled. "
"In the case of the main interpreter loop, it's a triplet "
"(code, ofs, is_profiled)"),
operations = interp_attrproperty_w('w_ops', cls=W_JitLoopInfo, doc=
"List of operations in this loop."),
loop_no = interp_attrproperty('loop_no', cls=W_JitLoopInfo, doc=
"Loop cardinal number"),
bridge_no = GetSetProperty(W_JitLoopInfo.descr_get_bridge_no,
doc="bridge number (if a bridge)"),
type = interp_attrproperty('type', cls=W_JitLoopInfo,
doc="Loop type"),
asmaddr = interp_attrproperty('asmaddr', cls=W_JitLoopInfo,
doc="Address of machine code"),
asmlen = interp_attrproperty('asmlen', cls=W_JitLoopInfo,
doc="Length of machine code"),
__repr__ = interp2app(W_JitLoopInfo.descr_repr),
)
W_JitLoopInfo.typedef.acceptable_as_base_class = False
class W_JitInfoSnapshot(W_Root):
def __init__(self, space, w_times, w_counters, w_counter_times):
self.w_loop_run_times = w_times
self.w_counters = w_counters
self.w_counter_times = w_counter_times
W_JitInfoSnapshot.typedef = TypeDef(
"JitInfoSnapshot",
loop_run_times = interp_attrproperty_w("w_loop_run_times",
cls=W_JitInfoSnapshot),
counters = interp_attrproperty_w("w_counters",
cls=W_JitInfoSnapshot,
doc="various JIT counters"),
counter_times = interp_attrproperty_w("w_counter_times",
cls=W_JitInfoSnapshot,
doc="various JIT timers")
)
W_JitInfoSnapshot.typedef.acceptable_as_base_class = False
def get_stats_snapshot(space):
""" Get the jit status in the specific moment in time. Note that this
is eager - the attribute access is not lazy, if you need new stats
you need to call this function again.
"""
ll_times = jit_hooks.stats_get_loop_run_times(None)
w_times = space.newdict()
if ll_times:
for i in range(len(ll_times)):
w_key = space.newtuple([space.wrap(ll_times[i].type),
space.wrap(ll_times[i].number)])
space.setitem(w_times, w_key,
space.wrap(ll_times[i].counter))
w_counters = space.newdict()
for i, counter_name in enumerate(Counters.counter_names):
v = jit_hooks.stats_get_counter_value(None, i)
space.setitem_str(w_counters, counter_name, space.wrap(v))
w_counter_times = space.newdict()
tr_time = jit_hooks.stats_get_times_value(None, Counters.TRACING)
space.setitem_str(w_counter_times, 'TRACING', space.wrap(tr_time))
b_time = jit_hooks.stats_get_times_value(None, Counters.BACKEND)
space.setitem_str(w_counter_times, 'BACKEND', space.wrap(b_time))
return space.wrap(W_JitInfoSnapshot(space, w_times, w_counters,
w_counter_times))
def get_stats_asmmemmgr(space):
"""Returns the raw memory currently used by the JIT backend,
as a pair (total_memory_allocated, memory_in_use)."""
m1 = jit_hooks.stats_asmmemmgr_allocated(None)
m2 = jit_hooks.stats_asmmemmgr_used(None)
return space.newtuple([space.wrap(m1), space.wrap(m2)])
def enable_debug(space):
""" Set the jit debugging - completely necessary for some stats to work,
most notably assembler counters.
"""
jit_hooks.stats_set_debug(None, True)
def disable_debug(space):
""" Disable the jit debugging. This means some very small loops will be
marginally faster and the counters will stop working.
"""
jit_hooks.stats_set_debug(None, False)
|
# Составить UML-диаграмму деятельности и программу для решения задачи: склавиатуры вводится номер месяца от 1 до 12,
# необходимо для этого номера месяца вывести наименование времени года
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
n = int(input("Введите номер месяца: "))
if n == 1 or n == 2 or n == 12:
print("Зима")
elif n == 3 or n == 4 or n == 5:
print("Весна")
elif n == 6 or n == 7 or n == 8:
print("Лето")
else:
print("Ошибка!", file=sys.stderr)
exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.