repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
hetajen/vnpy161 | vn.trader/ctaStrategy/ctaBacktesting.py | 1 | 40890 | # encoding: UTF-8
'''
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
History
<id> <author> <description>
2017051200 hetajen 样例:策略回测和优化
'''
from __future__ import division
'''2017051200 Add by hetajen begin'''
import time
'''2017051200 Add by hetajen end'''
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import multiprocessing
import pymongo
from ctaBase import *
from vtConstant import *
from vtGateway import VtOrderData, VtTradeData
from vtFunction import loadMongoSetting
########################################################################
class BacktestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 引擎类型为回测
self.engineType = ENGINETYPE_BACKTESTING
# 回测相关
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.startDate = ''
self.initDays = 0
self.endDate = ''
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.priceTick = 0 # 价格最小变动
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
#self.historyData = [] # 历史数据的列表,回测用
self.initData = [] # 初始化用的数据
#self.backtestingData = [] # 回测用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.limitOrderCount = 0 # 限价单编号
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
#----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.startDate = startDate
self.initDays = initDays
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
#----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
self.endDate = endDate
if endDate:
self.dataEndDate= datetime.strptime(endDate, '%Y%m%d')
# 若不修改时间则会导致不包含dataEndDate当天数据
self.dataEndDate.replace(hour=23, minute=59)
#----------------------------------------------------------------------
def setBacktestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
#----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
#----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
host, port, logging = loadMongoSetting()
self.dbClient = pymongo.MongoClient(host, port)
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
# 载入初始化需要用的数据
flt = {'datetime':{'$gte':self.dataStartDate,
'$lt':self.strategyStartDate}}
initCursor = collection.find(flt)
# 将数据从查询指针中读取出,并生成列表
self.initData = [] # 清空initData列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if not self.dataEndDate:
flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime':{'$gte':self.strategyStartDate,
'$lte':self.dataEndDate}}
self.dbCursor = collection.find(flt)
self.output(u'载入完成,数据量:%s' %(initCursor.count() + self.dbCursor.count()))
#----------------------------------------------------------------------
def runBacktesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.inited = True
self.strategy.onInit()
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
#----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
#----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
#----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = self.roundToPriceTick(price)
order.totalVolume = volume
order.status = STATUS_NOTTRADED # 刚提交尚未成交
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = str(self.dt)
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return orderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = str(self.dt)
del self.workingLimitOrderDict[vtOrderID]
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = self.roundToPriceTick(price)
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def crossLimitOrder(self):
"""基于最新数据撮合限价单"""
# 先确定会撮合成交的价格
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交
sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交
buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价
sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buyCrossPrice = self.tick.askPrice1
sellCrossPrice = self.tick.bidPrice1
buyBestCrossPrice = self.tick.askPrice1
sellBestCrossPrice = self.tick.bidPrice1
# 遍历限价单字典中的所有限价单
for orderID, order in self.workingLimitOrderDict.items():
# 判断是否会成交
buyCross = (order.direction==DIRECTION_LONG and
order.price>=buyCrossPrice and
buyCrossPrice > 0) # 国内的tick行情在涨停时askPrice1为0,此时买无法成交
sellCross = (order.direction==DIRECTION_SHORT and
order.price<=sellCrossPrice and
sellCrossPrice > 0) # 国内的tick行情在跌停时bidPrice1为0,此时卖无法成交
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = order.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
trade.orderID = order.orderID
trade.vtOrderID = order.orderID
trade.direction = order.direction
trade.offset = order.offset
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buyCross:
trade.price = min(order.price, buyBestCrossPrice)
self.strategy.pos += order.totalVolume
else:
trade.price = max(order.price, sellBestCrossPrice)
self.strategy.pos -= order.totalVolume
trade.volume = order.totalVolume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
order.tradedVolume = order.totalVolume
order.status = STATUS_ALLTRADED
self.strategy.onOrder(order)
# 从字典中删除该限价单
del self.workingLimitOrderDict[orderID]
#----------------------------------------------------------------------
def crossStopOrder(self):
"""基于最新数据撮合停止单"""
# 先确定会撮合成交的价格,这里和限价单规则相反
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交
sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交
bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于
else:
buyCrossPrice = self.tick.lastPrice
sellCrossPrice = self.tick.lastPrice
bestCrossPrice = self.tick.lastPrice
# 遍历停止单字典中的所有停止单
for stopOrderID, so in self.workingStopOrderDict.items():
# 判断是否会成交
buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice
sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
so.status = STOPORDER_TRIGGERED
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
self.strategy.onOrder(order)
self.limitOrderDict[orderID] = order
# 从字典中删除该限价单
if stopOrderID in self.workingStopOrderDict:
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录日志"""
log = str(self.dt) + ' ' + content
self.logList.append(log)
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
print str(datetime.now()) + "\t" + content
#----------------------------------------------------------------------
def calculateBacktestingResult(self):
"""
计算回测结果
"""
self.output(u'计算回测结果')
# 首先基于回测后的成交记录,计算每笔交易的盈亏
resultList = [] # 交易结果列表
longTrade = [] # 未平仓的多头交易
shortTrade = [] # 未平仓的空头交易
tradeTimeList = [] # 每笔成交时间戳
posList = [0] # 每笔成交后的持仓情况
for trade in self.tradeDict.values():
# 多头交易
if trade.direction == DIRECTION_LONG:
# 如果尚无空头交易
if not shortTrade:
longTrade.append(trade)
# 当前多头交易为平空
else:
while True:
entryTrade = shortTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
-closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([-1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
shortTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not shortTrade:
longTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 空头交易
else:
# 如果尚无多头交易
if not longTrade:
shortTrade.append(trade)
# 当前空头交易为平多
else:
while True:
entryTrade = longTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
longTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not longTrade:
shortTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 检查是否有交易
if not resultList:
self.output(u'无交易结果')
return {}
# 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等
capital = 0 # 资金
maxCapital = 0 # 资金最高净值
drawdown = 0 # 回撤
totalResult = 0 # 总成交数量
totalTurnover = 0 # 总成交金额(合约面值)
totalCommission = 0 # 总手续费
totalSlippage = 0 # 总滑点
timeList = [] # 时间序列
pnlList = [] # 每笔盈亏序列
capitalList = [] # 盈亏汇总的时间序列
drawdownList = [] # 回撤的时间序列
winningResult = 0 # 盈利次数
losingResult = 0 # 亏损次数
totalWinning = 0 # 总盈利金额
totalLosing = 0 # 总亏损金额
for result in resultList:
capital += result.pnl
maxCapital = max(capital, maxCapital)
drawdown = capital - maxCapital
pnlList.append(result.pnl)
timeList.append(result.exitDt) # 交易的时间戳使用平仓时间
capitalList.append(capital)
drawdownList.append(drawdown)
totalResult += 1
totalTurnover += result.turnover
totalCommission += result.commission
totalSlippage += result.slippage
if result.pnl >= 0:
winningResult += 1
totalWinning += result.pnl
else:
losingResult += 1
totalLosing += result.pnl
# 计算盈亏相关数据
winningRate = winningResult/totalResult*100 # 胜率
averageWinning = 0 # 这里把数据都初始化为0
averageLosing = 0
profitLossRatio = 0
if winningResult:
averageWinning = totalWinning/winningResult # 平均每笔盈利
if losingResult:
averageLosing = totalLosing/losingResult # 平均每笔亏损
if averageLosing:
profitLossRatio = -averageWinning/averageLosing # 盈亏比
# 返回回测结果
d = {}
d['capital'] = capital
d['maxCapital'] = maxCapital
d['drawdown'] = drawdown
d['totalResult'] = totalResult
d['totalTurnover'] = totalTurnover
d['totalCommission'] = totalCommission
d['totalSlippage'] = totalSlippage
d['timeList'] = timeList
d['pnlList'] = pnlList
d['capitalList'] = capitalList
d['drawdownList'] = drawdownList
d['winningRate'] = winningRate
d['averageWinning'] = averageWinning
d['averageLosing'] = averageLosing
d['profitLossRatio'] = profitLossRatio
d['posList'] = posList
d['tradeTimeList'] = tradeTimeList
return d
#----------------------------------------------------------------------
def showBacktestingResult(self):
"""显示回测结果"""
d = self.calculateBacktestingResult()
# 输出
self.output('-' * 30)
self.output(u'第一笔交易:\t%s' % d['timeList'][0])
self.output(u'最后一笔交易:\t%s' % d['timeList'][-1])
self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult']))
self.output(u'总盈亏:\t%s' % formatNumber(d['capital']))
self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList'])))
self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult']))
self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult']))
self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult']))
self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate']))
self.output(u'盈利交易平均值\t%s' %formatNumber(d['averageWinning']))
self.output(u'亏损交易平均值\t%s' %formatNumber(d['averageLosing']))
self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio']))
# 绘图
import matplotlib.pyplot as plt
import numpy as np
try:
import seaborn as sns # 如果安装了seaborn则设置为白色风格
sns.set_style('whitegrid')
except ImportError:
pass
pCapital = plt.subplot(4, 1, 1)
pCapital.set_ylabel("capital")
pCapital.plot(d['capitalList'], color='r', lw=0.8)
pDD = plt.subplot(4, 1, 2)
pDD.set_ylabel("DD")
pDD.bar(range(len(d['drawdownList'])), d['drawdownList'], color='g')
pPnl = plt.subplot(4, 1, 3)
pPnl.set_ylabel("pnl")
pPnl.hist(d['pnlList'], bins=50, color='c')
pPos = plt.subplot(4, 1, 4)
pPos.set_ylabel("Position")
if d['posList'][-1] == 0:
del d['posList'][-1]
tradeTimeIndex = [item.strftime("%m/%d %H:%M:%S") for item in d['tradeTimeList']]
xindex = np.arange(0, len(tradeTimeIndex), np.int(len(tradeTimeIndex)/10))
tradeTimeIndex = map(lambda i: tradeTimeIndex[i], xindex)
pPos.plot(d['posList'], color='k', drawstyle='steps-pre')
pPos.set_ylim(-1.2, 1.2)
plt.sca(pPos)
plt.tight_layout()
plt.xticks(xindex, tradeTimeIndex, rotation=30) # 旋转15
plt.show()
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""发送策略更新事件,回测中忽略"""
pass
#----------------------------------------------------------------------
def setSlippage(self, slippage):
"""设置滑点点数"""
self.slippage = slippage
#----------------------------------------------------------------------
def setSize(self, size):
"""设置合约大小"""
self.size = size
#----------------------------------------------------------------------
def setRate(self, rate):
"""设置佣金比例"""
self.rate = rate
#----------------------------------------------------------------------
def setPriceTick(self, priceTick):
"""设置价格最小变动"""
self.priceTick = priceTick
#----------------------------------------------------------------------
def runOptimization(self, strategyClass, optimizationSetting):
"""优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 遍历优化
resultList = []
for setting in settingList:
self.clearBacktestingResult()
self.output('-' * 30)
self.output('setting: %s' %str(setting))
self.initStrategy(strategyClass, setting)
self.runBacktesting()
d = self.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
resultList.append(([str(setting)], targetValue))
# 显示结果
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
return result
#----------------------------------------------------------------------
def clearBacktestingResult(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limitOrderCount = 0
self.limitOrderDict.clear()
self.workingLimitOrderDict.clear()
# 清空停止单相关
self.stopOrderCount = 0
self.stopOrderDict.clear()
self.workingStopOrderDict.clear()
# 清空成交相关
self.tradeCount = 0
self.tradeDict.clear()
#----------------------------------------------------------------------
def runParallelOptimization(self, strategyClass, optimizationSetting):
"""并行优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 多进程优化,启动一个对应CPU核心数量的进程池
pool = multiprocessing.Pool(multiprocessing.cpu_count())
l = []
for setting in settingList:
l.append(pool.apply_async(optimize, (strategyClass, setting,
targetName, self.mode,
self.startDate, self.initDays, self.endDate,
self.slippage, self.rate, self.size,
self.dbName, self.symbol)))
pool.close()
pool.join()
# 显示结果
resultList = [res.get() for res in l]
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
#----------------------------------------------------------------------
def roundToPriceTick(self, price):
"""取整价格到合约最小价格变动"""
if not self.priceTick:
return price
newPrice = round(price/self.priceTick, 0) * self.priceTick
return newPrice
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
#----------------------------------------------------------------------
def __init__(self, entryPrice, entryDt, exitPrice,
exitDt, volume, rate, slippage, size):
"""Constructor"""
self.entryPrice = entryPrice # 开仓价格
self.exitPrice = exitPrice # 平仓价格
self.entryDt = entryDt # 开仓时间datetime
self.exitDt = exitDt # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额
self.commission = self.turnover*rate # 手续费成本
self.slippage = slippage*2*size*abs(volume) # 滑点成本
self.pnl = ((self.exitPrice - self.entryPrice) * volume * size
- self.commission - self.slippage) # 净盈亏
########################################################################
class OptimizationSetting(object):
"""优化设置"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.paramDict = OrderedDict()
self.optimizeTarget = '' # 优化目标字段
#----------------------------------------------------------------------
def addParameter(self, name, start, end=None, step=None):
"""增加优化参数"""
if end is None and step is None:
self.paramDict[name] = [start]
return
if end < start:
print u'参数起始点必须不大于终止点'
return
if step <= 0:
print u'参数布进必须大于0'
return
l = []
param = start
while param <= end:
l.append(param)
param += step
self.paramDict[name] = l
#----------------------------------------------------------------------
def generateSetting(self):
"""生成优化参数组合"""
# 参数名的列表
nameList = self.paramDict.keys()
paramList = self.paramDict.values()
# 使用迭代工具生产参数对组合
productList = list(product(*paramList))
# 把参数对组合打包到一个个字典组成的列表中
settingList = []
for p in productList:
d = dict(zip(nameList, p))
settingList.append(d)
return settingList
#----------------------------------------------------------------------
def setOptimizeTarget(self, target):
"""设置优化目标字段"""
self.optimizeTarget = target
#----------------------------------------------------------------------
def formatNumber(n):
"""格式化数字到字符串"""
rn = round(n, 2) # 保留两位小数
return format(rn, ',') # 加上千分符
#----------------------------------------------------------------------
def optimize(strategyClass, setting, targetName,
mode, startDate, initDays, endDate,
slippage, rate, size,
dbName, symbol):
"""多进程优化时跑在每个进程中运行的函数"""
engine = BacktestingEngine()
engine.setBacktestingMode(mode)
engine.setStartDate(startDate, initDays)
engine.setEndDate(endDate)
engine.setSlippage(slippage)
engine.setRate(rate)
engine.setSize(size)
engine.setDatabase(dbName, symbol)
engine.initStrategy(strategyClass, setting)
engine.runBacktesting()
d = engine.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
return (str(setting), targetValue)
'''2017051200 Modify by hetajen begin'''
from strategy.strategyAtrRsi import AtrRsiStrategy
def getEngine():
engine = BacktestingEngine()
engine.setBacktestingMode(engine.BAR_MODE) # 引擎的回测模式为K线
engine.setStartDate('20120101') # 回测用的数据起始日期
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
return engine
def getParam(type=0):
if type == 0:
setting = {'atrLength': 11}
else:
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始11,结束12,步进1
setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数
return setting
def xh_backTesting():
engine = getEngine()
setting = getParam()
engine.initStrategy(AtrRsiStrategy, setting)
engine.runBacktesting() # 开始跑回测
engine.showBacktestingResult() # 显示回测结果
def xh_optimize():
engine = getEngine()
setting = getParam(1)
engine.runOptimization(AtrRsiStrategy, setting) # 单进程优化。耗时:xxx秒
#engine.runParallelOptimization(AtrRsiStrategy, setting) # 多进程优化。耗时:xx秒
if __name__ == '__main__':
start = time.time()
xh_backTesting()
xh_optimize()
print u'耗时:%s' % (time.time() - start) # 性能测试
'''2017051200 Modify by hetajen end'''
| mit |
seanwestfall/django | tests/m2m_through_regress/tests.py | 182 | 9847 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from django.utils.six import StringIO
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "frank@example.com", "password")
cls.jane = User.objects.create_user("jane", "jane@example.com", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.bob, "group_set", [])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.roll, "members", [])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.rock.members.create, name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.bob.group_set.create, name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254"
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml",
indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items('car', 'driver', self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107"
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""")
| bsd-3-clause |
googleapis/python-dialogflow-cx | google/cloud/dialogflowcx_v3beta1/services/flows/transports/grpc_asyncio.py | 1 | 23309 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import flow
from google.cloud.dialogflowcx_v3beta1.types import flow as gcdc_flow
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import FlowsTransport, DEFAULT_CLIENT_INFO
from .grpc import FlowsGrpcTransport
class FlowsGrpcAsyncIOTransport(FlowsTransport):
"""gRPC AsyncIO backend transport for Flows.
Service for managing
[Flows][google.cloud.dialogflow.cx.v3beta1.Flow].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_flow(
self,
) -> Callable[[gcdc_flow.CreateFlowRequest], Awaitable[gcdc_flow.Flow]]:
r"""Return a callable for the create flow method over gRPC.
Creates a flow in the specified agent.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Returns:
Callable[[~.CreateFlowRequest],
Awaitable[~.Flow]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_flow" not in self._stubs:
self._stubs["create_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/CreateFlow",
request_serializer=gcdc_flow.CreateFlowRequest.serialize,
response_deserializer=gcdc_flow.Flow.deserialize,
)
return self._stubs["create_flow"]
@property
def delete_flow(
self,
) -> Callable[[flow.DeleteFlowRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete flow method over gRPC.
Deletes a specified flow.
Returns:
Callable[[~.DeleteFlowRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_flow" not in self._stubs:
self._stubs["delete_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/DeleteFlow",
request_serializer=flow.DeleteFlowRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_flow"]
@property
def list_flows(
self,
) -> Callable[[flow.ListFlowsRequest], Awaitable[flow.ListFlowsResponse]]:
r"""Return a callable for the list flows method over gRPC.
Returns the list of all flows in the specified agent.
Returns:
Callable[[~.ListFlowsRequest],
Awaitable[~.ListFlowsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_flows" not in self._stubs:
self._stubs["list_flows"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/ListFlows",
request_serializer=flow.ListFlowsRequest.serialize,
response_deserializer=flow.ListFlowsResponse.deserialize,
)
return self._stubs["list_flows"]
@property
def get_flow(self) -> Callable[[flow.GetFlowRequest], Awaitable[flow.Flow]]:
r"""Return a callable for the get flow method over gRPC.
Retrieves the specified flow.
Returns:
Callable[[~.GetFlowRequest],
Awaitable[~.Flow]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_flow" not in self._stubs:
self._stubs["get_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/GetFlow",
request_serializer=flow.GetFlowRequest.serialize,
response_deserializer=flow.Flow.deserialize,
)
return self._stubs["get_flow"]
@property
def update_flow(
self,
) -> Callable[[gcdc_flow.UpdateFlowRequest], Awaitable[gcdc_flow.Flow]]:
r"""Return a callable for the update flow method over gRPC.
Updates the specified flow.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Returns:
Callable[[~.UpdateFlowRequest],
Awaitable[~.Flow]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_flow" not in self._stubs:
self._stubs["update_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/UpdateFlow",
request_serializer=gcdc_flow.UpdateFlowRequest.serialize,
response_deserializer=gcdc_flow.Flow.deserialize,
)
return self._stubs["update_flow"]
@property
def train_flow(
self,
) -> Callable[[flow.TrainFlowRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the train flow method over gRPC.
Trains the specified flow. Note that only the flow in 'draft'
environment is trained.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Returns:
Callable[[~.TrainFlowRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "train_flow" not in self._stubs:
self._stubs["train_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/TrainFlow",
request_serializer=flow.TrainFlowRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["train_flow"]
@property
def validate_flow(
self,
) -> Callable[[flow.ValidateFlowRequest], Awaitable[flow.FlowValidationResult]]:
r"""Return a callable for the validate flow method over gRPC.
Validates the specified flow and creates or updates
validation results. Please call this API after the
training is completed to get the complete validation
results.
Returns:
Callable[[~.ValidateFlowRequest],
Awaitable[~.FlowValidationResult]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "validate_flow" not in self._stubs:
self._stubs["validate_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/ValidateFlow",
request_serializer=flow.ValidateFlowRequest.serialize,
response_deserializer=flow.FlowValidationResult.deserialize,
)
return self._stubs["validate_flow"]
@property
def get_flow_validation_result(
self,
) -> Callable[
[flow.GetFlowValidationResultRequest], Awaitable[flow.FlowValidationResult]
]:
r"""Return a callable for the get flow validation result method over gRPC.
Gets the latest flow validation result. Flow
validation is performed when ValidateFlow is called.
Returns:
Callable[[~.GetFlowValidationResultRequest],
Awaitable[~.FlowValidationResult]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_flow_validation_result" not in self._stubs:
self._stubs["get_flow_validation_result"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/GetFlowValidationResult",
request_serializer=flow.GetFlowValidationResultRequest.serialize,
response_deserializer=flow.FlowValidationResult.deserialize,
)
return self._stubs["get_flow_validation_result"]
@property
def import_flow(
self,
) -> Callable[[flow.ImportFlowRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the import flow method over gRPC.
Imports the specified flow to the specified agent from a binary
file.
Note: You should always train a flow prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
Returns:
Callable[[~.ImportFlowRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_flow" not in self._stubs:
self._stubs["import_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/ImportFlow",
request_serializer=flow.ImportFlowRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_flow"]
@property
def export_flow(
self,
) -> Callable[[flow.ExportFlowRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the export flow method over gRPC.
Exports the specified flow to a binary file.
Note that resources (e.g. intents, entities, webhooks)
that the flow references will also be exported.
Returns:
Callable[[~.ExportFlowRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_flow" not in self._stubs:
self._stubs["export_flow"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Flows/ExportFlow",
request_serializer=flow.ExportFlowRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_flow"]
__all__ = ("FlowsGrpcAsyncIOTransport",)
| apache-2.0 |
botswana-harvard/tshilo-dikotla | td_maternal/forms/maternal_contact_form.py | 1 | 1974 | from django import forms
from edc_constants.constants import YES
from ..models import MaternalContact, MaternalLocator
class MaternalContactForm(forms.ModelForm):
def clean(self):
cleaned_data = super(MaternalContactForm, self).clean()
self.validate_maternal_locator()
self.validate_contact_success()
return cleaned_data
def validate_maternal_locator(self):
cleaned_data = self.cleaned_data
subject_identifier = cleaned_data.get(
'registered_subject').subject_identifier
try:
locator = MaternalLocator.objects.get(
registered_subject__subject_identifier=subject_identifier)
except MaternalLocator.DoesNotExist:
raise forms.ValidationError(
'Please complete the Locator form before adding contact record.')
else:
if cleaned_data.get('contact_type') == 'voice_call':
if locator.may_follow_up != YES:
raise forms.ValidationError(
'Maternal Locator says may_follow_up: {}, you cannot call '
'participant if they did not give permission.'.format(locator.may_follow_up))
if cleaned_data.get('contact_type') == 'text_message':
if locator.may_sms_follow_up != YES:
raise forms.ValidationError(
'Maternal Locator says may_sms_follow_up: {}, you cannot sms '
'participant if they did not give permission.'.format(locator.may_sms_follow_up))
def validate_contact_success(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('contact_success') == YES:
if not cleaned_data.get('contact_comment'):
raise forms.ValidationError(
'Please give the outcome of the contact with the participant.')
class Meta:
model = MaternalContact
fields = '__all__'
| gpl-2.0 |
google/eclipse2017 | scripts/obtain_token.py | 1 | 2613 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import argparse
import oauth2client
import urllib
import httplib
from oauth2client import tools
from oauth2client import client
from oauth2client import service_account
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
import json
scopes=[ 'email', 'profile' ]
parser = argparse.ArgumentParser(parents=[tools.argparser])
flags = parser.parse_args()
credentials = client.GoogleCredentials.get_application_default()
now = int(time.time())
payload = {
'iat': now,
'exp': now + credentials.MAX_TOKEN_LIFETIME_SECS,
'aud': 'https://www.googleapis.com/oauth2/v4/token',
'iss': 'https://accounts.google.com',
'scope': 'profile'
}
signed_jwt = oauth2client.crypt.make_signed_jwt(credentials._signer, payload, key_id=credentials._private_key_id)
params = urllib.urlencode({
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': signed_jwt })
headers = {"Content-Type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("www.googleapis.com")
conn.request("POST", "/oauth2/v4/token", params, headers)
r = conn.getresponse().read()
print r
res = json.loads(conn.getresponse().read())
print res
import pdb; pdb.set_trace()
# from common import secret_keys as sk
# # CLIENT_SECRETS = 'common/service_account.json'
# # creds= service_account.ServiceAccountCredentials.from_json_keyfile_name(CLIENT_SECRETS, scopes=scopes)
# flow = client.flow_from_clientsecrets(
# '/var/www/.config/gcloud/application_default_credentials.json', # downloaded file
# 'https://www.googleapis.com/auth/userinfo.email' # scope
# redirect_uri='urn:ietf:wg:oauth:2.0:oob')
# storage = Storage('credentials.dat')
# credentials = tools.run_flow(flow, storage, flags)
# import pdb; pdb.set_trace()
# flow = client.OAuth2WebServerFlow(sk.GOOGLE_OAUTH2_CLIENT_ID,
# sk.GOOGLE_OAUTH2_CLIENT_SECRET, scopes)
# http = httplib2.Http()
# http = credentials.authorize(http)
| apache-2.0 |
seishei/multiprocess | py3.2/multiprocess/dummy/__init__.py | 2 | 4590 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
import itertools
from multiprocess import TimeoutError, cpu_count
from multiprocess.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
class Condition(threading._Condition):
# XXX
if sys.version_info < (3, 0):
notify_all = threading._Condition.notify_all.__func__
else:
notify_all = threading._Condition.notify_all
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocess.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
| bsd-3-clause |
TiddlySpace/tiddlyspace | test/test_web_htmljavascript.py | 1 | 2427 | import pytest
from test.fixtures import make_test_env, make_fake_space, get_auth
from wsgi_intercept import httplib2_intercept
import wsgi_intercept
import httplib2
import simplejson
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.web.util import encode_name
def setup_module(module):
make_test_env(module)
httplib2_intercept.install()
wsgi_intercept.add_wsgi_intercept('thing.0.0.0.0', 8080, app_fn)
make_fake_space(store, 'thing')
module.http = httplib2.Http()
tiddler = Tiddler('OhHi', 'thing_public')
tiddler.text = '!Hi\n'
store.put(tiddler)
def test_nohtmljavascript():
response, content = http.request('http://thing.0.0.0.0:8080/OhHi')
assert response['status'] == '200'
assert 'OhHi' in content
assert '<script src="/bags/common/tiddlers/jquery.js"></script>' in content
assert '<script src="/bags/common/tiddlers/_reply-loader.js"></script>' in content
assert '<script src="/bags/common/tiddlers/backstage.js"></script>' in content
assert 'src="/foo.js"' not in content
assert 'src="/bar.js"' not in content
def test_htmljavascript_adds():
tiddler = Tiddler('HtmlJavascript', 'thing_public')
tiddler.text = '/foo.js\n/bar.js'
store.put(tiddler)
response, content = http.request('http://thing.0.0.0.0:8080/OhHi')
assert response['status'] == '200'
assert 'OhHi' in content
assert '<script src="/bags/common/tiddlers/jquery.js"></script>' in content
assert '<script src="/bags/common/tiddlers/_reply-loader.js"></script>' in content
assert '<script src="/bags/common/tiddlers/backstage.js"></script>' in content
assert 'src="/foo.js"></script>' in content
assert 'src="/bar.js"></script>' in content
def test_empty_htmljavascript_no_add():
tiddler = Tiddler('HtmlJavascript', 'thing_public')
tiddler.text = ' \n\n'
store.put(tiddler)
response, content = http.request('http://thing.0.0.0.0:8080/OhHi')
assert response['status'] == '200'
assert 'OhHi' in content
assert '<script src="/bags/common/tiddlers/jquery.js"></script>' in content
assert '<script src="/bags/common/tiddlers/_reply-loader.js"></script>' in content
assert '<script src="/bags/common/tiddlers/backstage.js"></script>' in content
assert 'src="/foo.js"></script>' not in content
assert 'src="/bar.js"></script>' not in content
assert 'src=""></script>' not in content
| bsd-3-clause |
anntzer/numpy | numpy/polynomial/_polybase.py | 6 | 36398 | """
Abstract base class for the various polynomial Classes.
The ABCPolyBase class provides the methods needed to implement the common API
for the various polynomial classes. It operates as a mixin, but uses the
abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
import os
import abc
import numbers
import numpy as np
from . import polyutils as pu
__all__ = ['ABCPolyBase']
class ABCPolyBase(abc.ABC):
"""An abstract base class for immutable series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
methods listed below.
.. versionadded:: 1.9.0
Parameters
----------
coef : array_like
Series coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
``P_i`` is the basis polynomials of degree ``i``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is the derived class domain.
window : (2,) array_like, optional
Window, see domain for its use. The default value is the
derived class window.
Attributes
----------
coef : (N,) ndarray
Series coefficients in order of increasing degree.
domain : (2,) ndarray
Domain that is mapped to window.
window : (2,) ndarray
Window that domain is mapped to.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
window : (2,) ndarray
Default window of the class.
"""
# Not hashable
__hash__ = None
# Opt out of numpy ufuncs and Python ops with ndarray subclasses.
__array_ufunc__ = None
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
# Unicode character mappings for improved __str__
_superscript_mapping = str.maketrans({
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹"
})
_subscript_mapping = str.maketrans({
"0": "₀",
"1": "₁",
"2": "₂",
"3": "₃",
"4": "₄",
"5": "₅",
"6": "₆",
"7": "₇",
"8": "₈",
"9": "₉"
})
# Some fonts don't support full unicode character ranges necessary for
# the full set of superscripts and subscripts, including common/default
# fonts in Windows shells/terminals. Therefore, default to ascii-only
# printing on windows.
_use_unicode = not os.name == 'nt'
@property
@abc.abstractmethod
def domain(self):
pass
@property
@abc.abstractmethod
def window(self):
pass
@property
@abc.abstractmethod
def basis_name(self):
pass
@staticmethod
@abc.abstractmethod
def _add(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _sub(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _mul(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _div(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _pow(c, pow, maxpower=None):
pass
@staticmethod
@abc.abstractmethod
def _val(x, c):
pass
@staticmethod
@abc.abstractmethod
def _int(c, m, k, lbnd, scl):
pass
@staticmethod
@abc.abstractmethod
def _der(c, m, scl):
pass
@staticmethod
@abc.abstractmethod
def _fit(x, y, deg, rcond, full):
pass
@staticmethod
@abc.abstractmethod
def _line(off, scl):
pass
@staticmethod
@abc.abstractmethod
def _roots(c):
pass
@staticmethod
@abc.abstractmethod
def _fromroots(r):
pass
def has_samecoef(self, other):
"""Check if coefficients match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``coef`` attribute.
Returns
-------
bool : boolean
True if the coefficients are the same, False otherwise.
"""
if len(self.coef) != len(other.coef):
return False
elif not np.all(self.coef == other.coef):
return False
else:
return True
def has_samedomain(self, other):
"""Check if domains match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``domain`` attribute.
Returns
-------
bool : boolean
True if the domains are the same, False otherwise.
"""
return np.all(self.domain == other.domain)
def has_samewindow(self, other):
"""Check if windows match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``window`` attribute.
Returns
-------
bool : boolean
True if the windows are the same, False otherwise.
"""
return np.all(self.window == other.window)
def has_sametype(self, other):
"""Check if types match.
.. versionadded:: 1.7.0
Parameters
----------
other : object
Class instance.
Returns
-------
bool : boolean
True if other is same class as self
"""
return isinstance(other, self.__class__)
def _get_coefficients(self, other):
"""Interpret other as polynomial coefficients.
The `other` argument is checked to see if it is of the same
class as self with identical domain and window. If so,
return its coefficients, otherwise return `other`.
.. versionadded:: 1.9.0
Parameters
----------
other : anything
Object to be checked.
Returns
-------
coef
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
TypeError
When `other` is an incompatible instance of ABCPolyBase.
"""
if isinstance(other, ABCPolyBase):
if not isinstance(other, self.__class__):
raise TypeError("Polynomial types differ")
elif not np.all(self.domain == other.domain):
raise TypeError("Domains differ")
elif not np.all(self.window == other.window):
raise TypeError("Windows differ")
return other.coef
return other
def __init__(self, coef, domain=None, window=None):
[coef] = pu.as_series([coef], trim=False)
self.coef = coef
if domain is not None:
[domain] = pu.as_series([domain], trim=False)
if len(domain) != 2:
raise ValueError("Domain has wrong number of elements.")
self.domain = domain
if window is not None:
[window] = pu.as_series([window], trim=False)
if len(window) != 2:
raise ValueError("Window has wrong number of elements.")
self.window = window
def __repr__(self):
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
return f"{name}({coef}, domain={domain}, window={window})"
def __format__(self, fmt_str):
if fmt_str == '':
return self.__str__()
if fmt_str not in ('ascii', 'unicode'):
raise ValueError(
f"Unsupported format string '{fmt_str}' passed to "
f"{self.__class__}.__format__. Valid options are "
f"'ascii' and 'unicode'"
)
if fmt_str == 'ascii':
return self._generate_string(self._str_term_ascii)
return self._generate_string(self._str_term_unicode)
def __str__(self):
if self._use_unicode:
return self._generate_string(self._str_term_unicode)
return self._generate_string(self._str_term_ascii)
def _generate_string(self, term_method):
"""
Generate the full string representation of the polynomial, using
``term_method`` to generate each polynomial term.
"""
# Get configuration for line breaks
linewidth = np.get_printoptions().get('linewidth', 75)
if linewidth < 1:
linewidth = 1
out = f"{self.coef[0]}"
for i, coef in enumerate(self.coef[1:]):
out += " "
power = str(i + 1)
# Polynomial coefficient
# The coefficient array can be an object array with elements that
# will raise a TypeError with >= 0 (e.g. strings or Python
# complex). In this case, represent the coeficient as-is.
try:
if coef >= 0:
next_term = f"+ {coef}"
else:
next_term = f"- {-coef}"
except TypeError:
next_term = f"+ {coef}"
# Polynomial term
next_term += term_method(power, "x")
# Length of the current line with next term added
line_len = len(out.split('\n')[-1]) + len(next_term)
# If not the last term in the polynomial, it will be two
# characters longer due to the +/- with the next term
if i < len(self.coef[1:]) - 1:
line_len += 2
# Handle linebreaking
if line_len >= linewidth:
next_term = next_term.replace(" ", "\n", 1)
out += next_term
return out
@classmethod
def _str_term_unicode(cls, i, arg_str):
"""
String representation of single polynomial term using unicode
characters for superscripts and subscripts.
"""
if cls.basis_name is None:
raise NotImplementedError(
"Subclasses must define either a basis_name, or override "
"_str_term_unicode(cls, i, arg_str)"
)
return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}"
f"({arg_str})")
@classmethod
def _str_term_ascii(cls, i, arg_str):
"""
String representation of a single polynomial term using ** and _ to
represent superscripts and subscripts, respectively.
"""
if cls.basis_name is None:
raise NotImplementedError(
"Subclasses must define either a basis_name, or override "
"_str_term_ascii(cls, i, arg_str)"
)
return f" {cls.basis_name}_{i}({arg_str})"
@classmethod
def _repr_latex_term(cls, i, arg_str, needs_parens):
if cls.basis_name is None:
raise NotImplementedError(
"Subclasses must define either a basis name, or override "
"_repr_latex_term(i, arg_str, needs_parens)")
# since we always add parens, we don't care if the expression needs them
return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})"
@staticmethod
def _repr_latex_scalar(x):
# TODO: we're stuck with disabling math formatting until we handle
# exponents in this function
return r'\text{{{}}}'.format(x)
def _repr_latex_(self):
# get the scaled argument string to the basis functions
off, scale = self.mapparms()
if off == 0 and scale == 1:
term = 'x'
needs_parens = False
elif scale == 1:
term = f"{self._repr_latex_scalar(off)} + x"
needs_parens = True
elif off == 0:
term = f"{self._repr_latex_scalar(scale)}x"
needs_parens = True
else:
term = (
f"{self._repr_latex_scalar(off)} + "
f"{self._repr_latex_scalar(scale)}x"
)
needs_parens = True
mute = r"\color{{LightGray}}{{{}}}".format
parts = []
for i, c in enumerate(self.coef):
# prevent duplication of + and - signs
if i == 0:
coef_str = f"{self._repr_latex_scalar(c)}"
elif not isinstance(c, numbers.Real):
coef_str = f" + ({self._repr_latex_scalar(c)})"
elif not np.signbit(c):
coef_str = f" + {self._repr_latex_scalar(c)}"
else:
coef_str = f" - {self._repr_latex_scalar(-c)}"
# produce the string for the term
term_str = self._repr_latex_term(i, term, needs_parens)
if term_str == '1':
part = coef_str
else:
part = rf"{coef_str}\,{term_str}"
if c == 0:
part = mute(part)
parts.append(part)
if parts:
body = ''.join(parts)
else:
# in case somehow there are no coefficients at all
body = '0'
return rf"$x \mapsto {body}$"
# Pickle and copy
def __getstate__(self):
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
ret['window'] = self.window.copy()
return ret
def __setstate__(self, dict):
self.__dict__ = dict
# Call
def __call__(self, arg):
off, scl = pu.mapparms(self.domain, self.window)
arg = off + scl*arg
return self._val(arg, self.coef)
def __iter__(self):
return iter(self.coef)
def __len__(self):
return len(self.coef)
# Numeric properties.
def __neg__(self):
return self.__class__(-self.coef, self.domain, self.window)
def __pos__(self):
return self
def __add__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._add(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._sub(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._mul(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, numbers.Number) or isinstance(other, bool):
raise TypeError(
f"unsupported types for true division: "
f"'{type(self)}', '{type(other)}'"
)
return self.__floordiv__(other)
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
def __divmod__(self, other):
othercoef = self._get_coefficients(other)
try:
quo, rem = self._div(self.coef, othercoef)
except ZeroDivisionError:
raise
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window)
return res
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError:
raise
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
np.all(self.window == other.window) and
(self.coef.shape == other.coef.shape) and
np.all(self.coef == other.coef))
return res
def __ne__(self, other):
return not self.__eq__(other)
#
# Extra methods.
#
def copy(self):
"""Return a copy.
Returns
-------
new_series : series
Copy of self.
"""
return self.__class__(self.coef, self.domain, self.window)
def degree(self):
"""The degree of the series.
.. versionadded:: 1.5.0
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
"""
return len(self) - 1
def cutdeg(self, deg):
"""Truncate series to the given degree.
Reduce the degree of the series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
.. versionadded:: 1.5.0
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_series : series
New instance of series with reduced degree.
"""
return self.truncate(deg + 1)
def trim(self, tol=0):
"""Remove trailing coefficients
Remove trailing coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set
to ``[0]``. A new series instance is returned with the new
coefficients. The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_series : series
New instance of series with trimmed coefficients.
"""
coef = pu.trimcoef(self.coef, tol)
return self.__class__(coef, self.domain, self.window)
def truncate(self, size):
"""Truncate series to length `size`.
Reduce the series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_series : series
New instance of series with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1:
raise ValueError("size must be a positive integer")
if isize >= len(self.coef):
coef = self.coef
else:
coef = self.coef[:isize]
return self.__class__(coef, self.domain, self.window)
def convert(self, domain=None, kind=None, window=None):
"""Convert series to a different kind and/or domain and/or window.
Parameters
----------
domain : array_like, optional
The domain of the converted series. If the value is None,
the default domain of `kind` is used.
kind : class, optional
The polynomial series type class to which the current instance
should be converted. If kind is None, then the class of the
current instance is used.
window : array_like, optional
The window of the converted series. If the value is None,
the default window of `kind` is used.
Returns
-------
new_series : series
The returned class can be of different type than the current
instance and/or have a different domain and/or different
window.
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
"""
if kind is None:
kind = self.__class__
if domain is None:
domain = kind.domain
if window is None:
window = kind.window
return self(kind.identity(domain, window=window))
def mapparms(self):
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
map depends on the ``domain`` and ``window``; if the current
``domain`` is equal to the ``window`` the resulting map is the
identity. If the coefficients of the series instance are to be
used by themselves outside this class, then the linear function
must be substituted for the ``x`` in the standard representation of
the base polynomials.
Returns
-------
off, scl : float or complex
The mapping function is defined by ``off + scl*x``.
Notes
-----
If the current domain is the interval ``[l1, r1]`` and the window
is ``[l2, r2]``, then the linear mapping function ``L`` is
defined by the equations::
L(l1) = l2
L(r1) = r2
"""
return pu.mapparms(self.domain, self.window)
def integ(self, m=1, k=[], lbnd=None):
"""Integrate.
Return a series instance that is the definite integral of the
current series.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
new_series : series
A new series representing the integral. The domain is the same
as the domain of the integrated series.
"""
off, scl = self.mapparms()
if lbnd is None:
lbnd = 0
else:
lbnd = off + scl*lbnd
coef = self._int(self.coef, m, k, lbnd, 1./scl)
return self.__class__(coef, self.domain, self.window)
def deriv(self, m=1):
"""Differentiate.
Return a series instance of that is the derivative of the current
series.
Parameters
----------
m : non-negative int
Find the derivative of order `m`.
Returns
-------
new_series : series
A new series representing the derivative. The domain is the same
as the domain of the differentiated series.
"""
off, scl = self.mapparms()
coef = self._der(self.coef, m, scl)
return self.__class__(coef, self.domain, self.window)
def roots(self):
"""Return the roots of the series polynomial.
Compute the roots for the series. Note that the accuracy of the
roots decrease the further outside the domain they lie.
Returns
-------
roots : ndarray
Array containing the roots of the series.
"""
roots = self._roots(self.coef)
return pu.mapdomain(roots, self.window, self.domain)
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
.. versionadded:: 1.5.0
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
@classmethod
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,)
y-coordinates of the M sample points ``(x[i], y[i])``.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain and window specified in the call. If the
coefficients for the unscaled and unshifted basis polynomials are
of interest, do ``new_series.convert().coef``.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
@classmethod
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl*roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
@classmethod
def identity(cls, domain=None, window=None):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window)
@classmethod
def basis(cls, deg, domain=None, window=None):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
.. versionadded:: 1.7.0
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0]*ideg + [1], domain, window)
@classmethod
def cast(cls, series, domain=None, window=None):
"""Convert series to series of this class.
The `series` is expected to be an instance of some polynomial
series of one of the types supported by by the numpy.polynomial
module, but could be some other class that supports the convert
method.
.. versionadded:: 1.7.0
Parameters
----------
series : series
The series instance to be converted.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series of the same kind as the calling class and equal to
`series` when evaluated.
See Also
--------
convert : similar instance method
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
return series.convert(domain, cls, window)
| bsd-3-clause |
sgiavasis/nipype | nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py | 12 | 1637 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..utilities import HistogramMatchingFilter
def test_HistogramMatchingFilter_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
histogramAlgorithm=dict(argstr='--histogramAlgorithm %s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputBinaryVolume=dict(argstr='--inputBinaryVolume %s',
),
inputVolume=dict(argstr='--inputVolume %s',
),
numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d',
),
numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
referenceBinaryVolume=dict(argstr='--referenceBinaryVolume %s',
),
referenceVolume=dict(argstr='--referenceVolume %s',
),
terminal_output=dict(nohash=True,
),
verbose=dict(argstr='--verbose ',
),
writeHistogram=dict(argstr='--writeHistogram %s',
),
)
inputs = HistogramMatchingFilter.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_HistogramMatchingFilter_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = HistogramMatchingFilter.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
KenV99/service.kodi.callbacks | resources/lib/watchdog/utils/delayed_queue.py | 23 | 2926 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
from collections import deque
class DelayedQueue(object):
def __init__(self, delay):
self.delay = delay
self._lock = threading.Lock()
self._not_empty = threading.Condition(self._lock)
self._queue = deque()
self._closed = False
def put(self, element):
"""Add element to queue."""
self._lock.acquire()
self._queue.append((element, time.time()))
self._not_empty.notify()
self._lock.release()
def close(self):
"""Close queue, indicating no more items will be added."""
self._closed = True
# Interrupt the blocking _not_empty.wait() call in get
self._not_empty.acquire()
self._not_empty.notify()
self._not_empty.release()
def get(self):
"""Remove and return an element from the queue, or this queue has been
closed raise the Closed exception.
"""
while True:
# wait for element to be added to queue
self._not_empty.acquire()
while len(self._queue) == 0 and not self._closed:
self._not_empty.wait()
if self._closed:
self._not_empty.release()
return None
head, insert_time = self._queue[0]
self._not_empty.release()
# wait for delay
time_left = insert_time + self.delay - time.time()
while time_left > 0:
time.sleep(time_left)
time_left = insert_time + self.delay - time.time()
# return element if it's still in the queue
self._lock.acquire()
try:
if len(self._queue) > 0 and self._queue[0][0] is head:
self._queue.popleft()
return head
finally:
self._lock.release()
def remove(self, predicate):
"""Remove and return the first items for which predicate is True,
ignoring delay."""
try:
self._lock.acquire()
for i, (elem, t) in enumerate(self._queue):
if predicate(elem):
del self._queue[i]
return elem
finally:
self._lock.release()
return None
| gpl-3.0 |
gurneyalex/odoo | addons/fleet/models/fleet_vehicle.py | 4 | 20652 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.osv import expression
class FleetVehicle(models.Model):
_inherit = ['mail.thread', 'mail.activity.mixin']
_name = 'fleet.vehicle'
_description = 'Vehicle'
_order = 'license_plate asc, acquisition_date asc'
def _get_default_state(self):
state = self.env.ref('fleet.fleet_vehicle_state_registered', raise_if_not_found=False)
return state if state and state.id else False
name = fields.Char(compute="_compute_vehicle_name", store=True)
active = fields.Boolean('Active', default=True, tracking=True)
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id')
license_plate = fields.Char(tracking=True,
help='License plate number of the vehicle (i = plate number for a car)')
vin_sn = fields.Char('Chassis Number', help='Unique number written on the vehicle motor (VIN/SN number)', copy=False)
driver_id = fields.Many2one('res.partner', 'Driver', tracking=True, help='Driver of the vehicle', copy=False)
future_driver_id = fields.Many2one('res.partner', 'Future Driver', tracking=True, help='Next Driver of the vehicle', copy=False, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
model_id = fields.Many2one('fleet.vehicle.model', 'Model',
tracking=True, required=True, help='Model of the vehicle')
manager_id = fields.Many2one('res.users', related='model_id.manager_id')
brand_id = fields.Many2one('fleet.vehicle.model.brand', 'Brand', related="model_id.brand_id", store=True, readonly=False)
log_drivers = fields.One2many('fleet.vehicle.assignation.log', 'vehicle_id', string='Assignation Logs')
log_fuel = fields.One2many('fleet.vehicle.log.fuel', 'vehicle_id', 'Fuel Logs')
log_services = fields.One2many('fleet.vehicle.log.services', 'vehicle_id', 'Services Logs')
log_contracts = fields.One2many('fleet.vehicle.log.contract', 'vehicle_id', 'Contracts')
cost_count = fields.Integer(compute="_compute_count_all", string="Costs")
contract_count = fields.Integer(compute="_compute_count_all", string='Contract Count')
service_count = fields.Integer(compute="_compute_count_all", string='Services')
fuel_logs_count = fields.Integer(compute="_compute_count_all", string='Fuel Log Count')
odometer_count = fields.Integer(compute="_compute_count_all", string='Odometer')
history_count = fields.Integer(compute="_compute_count_all", string="Drivers History Count")
next_assignation_date = fields.Date('Assignation Date', help='This is the date at which the car will be available, if not set it means available instantly')
acquisition_date = fields.Date('Immatriculation Date', required=False,
default=fields.Date.today, help='Date when the vehicle has been immatriculated')
first_contract_date = fields.Date(string="First Contract Date", default=fields.Date.today)
color = fields.Char(help='Color of the vehicle')
state_id = fields.Many2one('fleet.vehicle.state', 'State',
default=_get_default_state, group_expand='_read_group_stage_ids',
tracking=True,
help='Current state of the vehicle', ondelete="set null")
location = fields.Char(help='Location of the vehicle (garage, ...)')
seats = fields.Integer('Seats Number', help='Number of seats of the vehicle')
model_year = fields.Char('Model Year', help='Year of the model')
doors = fields.Integer('Doors Number', help='Number of doors of the vehicle', default=5)
tag_ids = fields.Many2many('fleet.vehicle.tag', 'fleet_vehicle_vehicle_tag_rel', 'vehicle_tag_id', 'tag_id', 'Tags', copy=False)
odometer = fields.Float(compute='_get_odometer', inverse='_set_odometer', string='Last Odometer',
help='Odometer measure of the vehicle at the moment of this log')
odometer_unit = fields.Selection([
('kilometers', 'Kilometers'),
('miles', 'Miles')
], 'Odometer Unit', default='kilometers', help='Unit of the odometer ', required=True)
transmission = fields.Selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle')
fuel_type = fields.Selection([
('gasoline', 'Gasoline'),
('diesel', 'Diesel'),
('lpg', 'LPG'),
('electric', 'Electric'),
('hybrid', 'Hybrid')
], 'Fuel Type', help='Fuel Used by the vehicle')
horsepower = fields.Integer()
horsepower_tax = fields.Float('Horsepower Taxation')
power = fields.Integer('Power', help='Power in kW of the vehicle')
co2 = fields.Float('CO2 Emissions', help='CO2 emissions of the vehicle')
image_128 = fields.Image(related='model_id.image_128', readonly=False)
contract_renewal_due_soon = fields.Boolean(compute='_compute_contract_reminder', search='_search_contract_renewal_due_soon',
string='Has Contracts to renew', multi='contract_info')
contract_renewal_overdue = fields.Boolean(compute='_compute_contract_reminder', search='_search_get_overdue_contract_reminder',
string='Has Contracts Overdue', multi='contract_info')
contract_renewal_name = fields.Text(compute='_compute_contract_reminder', string='Name of contract to renew soon', multi='contract_info')
contract_renewal_total = fields.Text(compute='_compute_contract_reminder', string='Total of contracts due or overdue minus one',
multi='contract_info')
car_value = fields.Float(string="Catalog Value (VAT Incl.)", help='Value of the bought vehicle')
net_car_value = fields.Float(string="Purchase Value", help="Purchase Value of the car")
residual_value = fields.Float()
plan_to_change_car = fields.Boolean(related='driver_id.plan_to_change_car', store=True, readonly=False)
@api.depends('model_id.brand_id.name', 'model_id.name', 'license_plate')
def _compute_vehicle_name(self):
for record in self:
record.name = (record.model_id.brand_id.name or '') + '/' + (record.model_id.name or '') + '/' + (record.license_plate or _('No Plate'))
def _get_odometer(self):
FleetVehicalOdometer = self.env['fleet.vehicle.odometer']
for record in self:
vehicle_odometer = FleetVehicalOdometer.search([('vehicle_id', '=', record.id)], limit=1, order='value desc')
if vehicle_odometer:
record.odometer = vehicle_odometer.value
else:
record.odometer = 0
def _set_odometer(self):
for record in self:
if record.odometer:
date = fields.Date.context_today(record)
data = {'value': record.odometer, 'date': date, 'vehicle_id': record.id}
self.env['fleet.vehicle.odometer'].create(data)
def _compute_count_all(self):
Odometer = self.env['fleet.vehicle.odometer']
LogFuel = self.env['fleet.vehicle.log.fuel']
LogService = self.env['fleet.vehicle.log.services']
LogContract = self.env['fleet.vehicle.log.contract']
Cost = self.env['fleet.vehicle.cost']
for record in self:
record.odometer_count = Odometer.search_count([('vehicle_id', '=', record.id)])
record.fuel_logs_count = LogFuel.search_count([('vehicle_id', '=', record.id)])
record.service_count = LogService.search_count([('vehicle_id', '=', record.id)])
record.contract_count = LogContract.search_count([('vehicle_id', '=', record.id), ('state', '!=', 'closed')])
record.cost_count = Cost.search_count([('vehicle_id', '=', record.id), ('parent_id', '=', False)])
record.history_count = self.env['fleet.vehicle.assignation.log'].search_count([('vehicle_id', '=', record.id)])
@api.depends('log_contracts')
def _compute_contract_reminder(self):
params = self.env['ir.config_parameter'].sudo()
delay_alert_contract = int(params.get_param('hr_fleet.delay_alert_contract', default=30))
for record in self:
overdue = False
due_soon = False
total = 0
name = ''
for element in record.log_contracts:
if element.state in ('open', 'diesoon', 'expired') and element.expiration_date:
current_date_str = fields.Date.context_today(record)
due_time_str = element.expiration_date
current_date = fields.Date.from_string(current_date_str)
due_time = fields.Date.from_string(due_time_str)
diff_time = (due_time - current_date).days
if diff_time < 0:
overdue = True
total += 1
if diff_time < delay_alert_contract:
due_soon = True
total += 1
if overdue or due_soon:
log_contract = self.env['fleet.vehicle.log.contract'].search([
('vehicle_id', '=', record.id),
('state', 'in', ('open', 'diesoon', 'expired'))
], limit=1, order='expiration_date asc')
if log_contract:
# we display only the name of the oldest overdue/due soon contract
name = log_contract.cost_subtype_id.name
record.contract_renewal_overdue = overdue
record.contract_renewal_due_soon = due_soon
record.contract_renewal_total = total - 1 # we remove 1 from the real total for display purposes
record.contract_renewal_name = name
def _search_contract_renewal_due_soon(self, operator, value):
params = self.env['ir.config_parameter'].sudo()
delay_alert_contract = int(params.get_param('hr_fleet.delay_alert_contract', default=30))
res = []
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value is True) or (operator in ('<>', '!=') and value is False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.Date.context_today(self)
datetime_today = fields.Datetime.from_string(today)
limit_date = fields.Datetime.to_string(datetime_today + relativedelta(days=+delay_alert_contract))
self.env.cr.execute("""SELECT cost.vehicle_id,
count(contract.id) AS contract_number
FROM fleet_vehicle_cost cost
LEFT JOIN fleet_vehicle_log_contract contract ON contract.cost_id = cost.id
WHERE contract.expiration_date IS NOT NULL
AND contract.expiration_date > %s
AND contract.expiration_date < %s
AND contract.state IN ('open', 'diesoon', 'expired')
GROUP BY cost.vehicle_id""", (today, limit_date))
res_ids = [x[0] for x in self.env.cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _search_get_overdue_contract_reminder(self, operator, value):
res = []
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value is True) or (operator in ('<>', '!=') and value is False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.Date.context_today(self)
self.env.cr.execute('''SELECT cost.vehicle_id,
count(contract.id) AS contract_number
FROM fleet_vehicle_cost cost
LEFT JOIN fleet_vehicle_log_contract contract ON contract.cost_id = cost.id
WHERE contract.expiration_date IS NOT NULL
AND contract.expiration_date < %s
AND contract.state IN ('open', 'diesoon', 'expired')
GROUP BY cost.vehicle_id ''', (today,))
res_ids = [x[0] for x in self.env.cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
@api.model
def create(self, vals):
# Fleet administrator may not have rights to create the plan_to_change_car value when the driver_id is a res.user
# This trick is used to prevent access right error.
ptc_value = 'plan_to_change_car' in vals.keys() and {'plan_to_change_car': vals.pop('plan_to_change_car')}
res = super(FleetVehicle, self).create(vals)
if ptc_value:
res.sudo().write(ptc_value)
if 'driver_id' in vals and vals['driver_id']:
res.create_driver_history(vals['driver_id'])
if 'future_driver_id' in vals and vals['future_driver_id']:
state_waiting_list = self.env.ref('fleet.fleet_vehicle_state_waiting_list', raise_if_not_found=False)
states = res.mapped('state_id').ids
if not state_waiting_list or state_waiting_list.id not in states:
future_driver = self.env['res.partner'].browse(vals['future_driver_id'])
future_driver.sudo().write({'plan_to_change_car': True})
return res
def write(self, vals):
if 'driver_id' in vals and vals['driver_id']:
driver_id = vals['driver_id']
self.filtered(lambda v: v.driver_id.id != driver_id).create_driver_history(driver_id)
if 'future_driver_id' in vals and vals['future_driver_id']:
state_waiting_list = self.env.ref('fleet.fleet_vehicle_state_waiting_list', raise_if_not_found=False)
states = self.mapped('state_id').ids if 'state_id' not in vals else [vals['state_id']]
if not state_waiting_list or state_waiting_list.id not in states:
future_driver = self.env['res.partner'].browse(vals['future_driver_id'])
future_driver.sudo().write({'plan_to_change_car': True})
res = super(FleetVehicle, self).write(vals)
if 'active' in vals and not vals['active']:
self.mapped('log_contracts').write({'active': False})
return res
def _close_driver_history(self):
self.env['fleet.vehicle.assignation.log'].search([
('vehicle_id', 'in', self.ids),
('driver_id', 'in', self.mapped('driver_id').ids),
('date_end', '=', False)
]).write({'date_end': fields.Date.today()})
def create_driver_history(self, driver_id):
for vehicle in self:
self.env['fleet.vehicle.assignation.log'].create({
'vehicle_id': vehicle.id,
'driver_id': driver_id,
'date_start': fields.Date.today(),
})
def action_accept_driver_change(self):
# Find all the vehicles for which the driver is the future_driver_id
# remove their driver_id and close their history using current date
vehicles = self.search([('driver_id', 'in', self.mapped('future_driver_id').ids)])
vehicles.write({'driver_id': False})
vehicles._close_driver_history()
for vehicle in self:
vehicle.future_driver_id.sudo().write({'plan_to_change_car': False})
vehicle.driver_id = vehicle.future_driver_id
vehicle.future_driver_id = False
@api.model
def _read_group_stage_ids(self, stages, domain, order):
return self.env['fleet.vehicle.state'].search([], order=order)
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if operator == 'ilike' and not (name or '').strip():
domain = []
else:
domain = ['|', ('name', operator, name), ('driver_id.name', operator, name)]
rec = self._search(expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid)
return models.lazy_name_get(self.browse(rec).with_user(name_get_uid))
def return_action_to_open(self):
""" This opens the xml view specified in xml_id for the current vehicle """
self.ensure_one()
xml_id = self.env.context.get('xml_id')
if xml_id:
res = self.env['ir.actions.act_window'].for_xml_id('fleet', xml_id)
res.update(
context=dict(self.env.context, default_vehicle_id=self.id, group_by=False),
domain=[('vehicle_id', '=', self.id)]
)
return res
return False
def act_show_log_cost(self):
""" This opens log view to view and add new log for this vehicle, groupby default to only show effective costs
@return: the costs log view
"""
self.ensure_one()
copy_context = dict(self.env.context)
copy_context.pop('group_by', None)
res = self.env['ir.actions.act_window'].for_xml_id('fleet', 'fleet_vehicle_costs_action')
res.update(
context=dict(copy_context, default_vehicle_id=self.id, search_default_parent_false=True),
domain=[('vehicle_id', '=', self.id)]
)
return res
def _track_subtype(self, init_values):
self.ensure_one()
if 'driver_id' in init_values or 'future_driver_id' in init_values:
return self.env.ref('fleet.mt_fleet_driver_updated')
return super(FleetVehicle, self)._track_subtype(init_values)
def open_assignation_logs(self):
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'name': 'Assignation Logs',
'view_mode': 'tree',
'res_model': 'fleet.vehicle.assignation.log',
'domain': [('vehicle_id', '=', self.id)],
'context': {'default_driver_id': self.driver_id.id, 'default_vehicle_id': self.id}
}
class FleetVehicleOdometer(models.Model):
_name = 'fleet.vehicle.odometer'
_description = 'Odometer log for a vehicle'
_order = 'date desc'
name = fields.Char(compute='_compute_vehicle_log_name', store=True)
date = fields.Date(default=fields.Date.context_today)
value = fields.Float('Odometer Value', group_operator="max")
vehicle_id = fields.Many2one('fleet.vehicle', 'Vehicle', required=True)
unit = fields.Selection(related='vehicle_id.odometer_unit', string="Unit", readonly=True)
driver_id = fields.Many2one(related="vehicle_id.driver_id", string="Driver", readonly=False)
@api.depends('vehicle_id', 'date')
def _compute_vehicle_log_name(self):
for record in self:
name = record.vehicle_id.name
if not name:
name = str(record.date)
elif record.date:
name += ' / ' + str(record.date)
record.name = name
@api.onchange('vehicle_id')
def _onchange_vehicle(self):
if self.vehicle_id:
self.unit = self.vehicle_id.odometer_unit
class FleetVehicleState(models.Model):
_name = 'fleet.vehicle.state'
_order = 'sequence asc'
_description = 'Vehicle Status'
name = fields.Char(required=True, translate=True)
sequence = fields.Integer(help="Used to order the note stages")
_sql_constraints = [('fleet_state_name_unique', 'unique(name)', 'State name already exists')]
class FleetVehicleTag(models.Model):
_name = 'fleet.vehicle.tag'
_description = 'Vehicle Tag'
name = fields.Char('Tag Name', required=True, translate=True)
color = fields.Integer('Color Index')
_sql_constraints = [('name_uniq', 'unique (name)', "Tag name already exists !")]
class FleetServiceType(models.Model):
_name = 'fleet.service.type'
_description = 'Fleet Service Type'
name = fields.Char(required=True, translate=True)
category = fields.Selection([
('contract', 'Contract'),
('service', 'Service')
], 'Category', required=True, help='Choose whether the service refer to contracts, vehicle services or both')
class FleetVehicleAssignationLog(models.Model):
_name = "fleet.vehicle.assignation.log"
_description = "Drivers history on a vehicle"
_order = "create_date desc, date_start desc"
vehicle_id = fields.Many2one('fleet.vehicle', string="Vehicle", required=True)
driver_id = fields.Many2one('res.partner', string="Driver", required=True)
date_start = fields.Date(string="Start Date")
date_end = fields.Date(string="End Date")
| agpl-3.0 |
Mixser/django | tests/swappable_models/tests.py | 339 | 2130 | from __future__ import unicode_literals
from swappable_models.models import Article
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class SwappableModelTests(TestCase):
available_apps = [
'swappable_models',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@override_settings(TEST_ARTICLE_MODEL='swappable_models.AlternateArticle')
def test_generated_data(self):
"Permissions and content types are not created for a swapped model"
# Delete all permissions and content_types
Permission.objects.filter(content_type__app_label='swappable_models').delete()
ContentType.objects.filter(app_label='swappable_models').delete()
# Re-run migrate. This will re-build the permissions and content types.
new_io = StringIO()
management.call_command('migrate', interactive=False, stdout=new_io)
# Check that content types and permissions exist for the swapped model,
# but not for the swappable model.
apps_models = [(p.content_type.app_label, p.content_type.model)
for p in Permission.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
apps_models = [(ct.app_label, ct.model)
for ct in ContentType.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
@override_settings(TEST_ARTICLE_MODEL='swappable_models.article')
def test_case_insensitive(self):
"Model names are case insensitive. Check that model swapping honors this."
try:
Article.objects.all()
except AttributeError:
self.fail('Swappable model names should be case insensitive.')
self.assertIsNone(Article._meta.swapped)
| bsd-3-clause |
netfirms/erpnext | erpnext/patches/v4_0/update_user_properties.py | 119 | 1879 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.permissions
import frappe.defaults
def execute():
frappe.reload_doc("core", "doctype", "docfield")
frappe.reload_doc("hr", "doctype", "employee")
set_print_email_permissions()
migrate_user_properties_to_user_permissions()
frappe.clear_cache()
def migrate_user_properties_to_user_permissions():
for d in frappe.db.sql("""select parent, defkey, defvalue from tabDefaultValue
where parent not in ('__global', '__default')""", as_dict=True):
df = frappe.db.sql("""select options from tabDocField
where fieldname=%s and fieldtype='Link'""", d.defkey, as_dict=True)
if df:
frappe.db.sql("""update tabDefaultValue
set defkey=%s, parenttype='User Permission'
where defkey=%s and
parent not in ('__global', '__default')""", (df[0].options, d.defkey))
def set_print_email_permissions():
# reset Page perms
from frappe.core.page.permission_manager.permission_manager import reset
reset("Page")
reset("Report")
if "allow_print" not in frappe.db.get_table_columns("DocType"):
return
# patch to move print, email into DocPerm
# NOTE: allow_print and allow_email are misnamed. They were used to hide print / hide email
for doctype, hide_print, hide_email in frappe.db.sql("""select name, ifnull(allow_print, 0), ifnull(allow_email, 0)
from `tabDocType` where ifnull(issingle, 0)=0 and ifnull(istable, 0)=0 and
(ifnull(allow_print, 0)=0 or ifnull(allow_email, 0)=0)"""):
if not hide_print:
frappe.db.sql("""update `tabDocPerm` set `print`=1
where permlevel=0 and `read`=1 and parent=%s""", doctype)
if not hide_email:
frappe.db.sql("""update `tabDocPerm` set `email`=1
where permlevel=0 and `read`=1 and parent=%s""", doctype)
| agpl-3.0 |
toddeye/home-assistant | homeassistant/components/switch/mfi.py | 2 | 2779 | """
homeassistant.components.switch.mfi
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Ubiquiti mFi switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.mfi/
"""
import logging
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
REQUIREMENTS = ['mficlient==0.2.2']
_LOGGER = logging.getLogger(__name__)
SWITCH_MODELS = [
'Outlet',
'Output 5v',
'Output 12v',
'Output 24v',
]
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up mFi sensors. """
if not validate_config({DOMAIN: config},
{DOMAIN: ['host',
CONF_USERNAME,
CONF_PASSWORD]},
_LOGGER):
_LOGGER.error('A host, username, and password are required')
return False
host = config.get('host')
port = int(config.get('port', 6443))
username = config.get('username')
password = config.get('password')
from mficlient.client import MFiClient
try:
client = MFiClient(host, username, password, port=port)
except client.FailedToLogin as ex:
_LOGGER.error('Unable to connect to mFi: %s', str(ex))
return False
add_devices(MfiSwitch(port)
for device in client.get_devices()
for port in device.ports.values()
if port.model in SWITCH_MODELS)
class MfiSwitch(SwitchDevice):
""" An mFi switch-able device. """
def __init__(self, port):
self._port = port
self._target_state = None
@property
def should_poll(self):
return True
@property
def unique_id(self):
return self._port.ident
@property
def name(self):
return self._port.label
@property
def is_on(self):
return self._port.output
def update(self):
self._port.refresh()
if self._target_state is not None:
self._port.data['output'] = float(self._target_state)
self._target_state = None
def turn_on(self):
self._port.control(True)
self._target_state = True
def turn_off(self):
self._port.control(False)
self._target_state = False
@property
def current_power_mwh(self):
return int(self._port.data.get('active_pwr', 0) * 1000)
@property
def device_state_attributes(self):
attr = {}
attr['volts'] = round(self._port.data.get('v_rms', 0), 1)
attr['amps'] = round(self._port.data.get('i_rms', 0), 1)
return attr
| mit |
RJVB/audacity | lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/kde4.py | 275 | 2007 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,re
from waflib import Options,TaskGen,Task,Utils
from waflib.TaskGen import feature,after_method
@feature('msgfmt')
def apply_msgfmt(self):
for lang in self.to_list(self.langs):
node=self.path.find_resource(lang+'.po')
task=self.create_task('msgfmt',node,node.change_ext('.mo'))
langname=lang.split('/')
langname=langname[-1]
inst=getattr(self,'install_path','${KDE4_LOCALE_INSTALL_DIR}')
self.bld.install_as(inst+os.sep+langname+os.sep+'LC_MESSAGES'+os.sep+getattr(self,'appname','set_your_appname')+'.mo',task.outputs[0],chmod=getattr(self,'chmod',Utils.O644))
class msgfmt(Task.Task):
color='BLUE'
run_str='${MSGFMT} ${SRC} -o ${TGT}'
def configure(self):
kdeconfig=self.find_program('kde4-config')
prefix=self.cmd_and_log('%s --prefix'%kdeconfig).strip()
fname='%s/share/apps/cmake/modules/KDELibsDependencies.cmake'%prefix
try:os.stat(fname)
except OSError:
fname='%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake'%prefix
try:os.stat(fname)
except OSError:self.fatal('could not open %s'%fname)
try:
txt=Utils.readf(fname)
except(OSError,IOError):
self.fatal('could not read %s'%fname)
txt=txt.replace('\\\n','\n')
fu=re.compile('#(.*)\n')
txt=fu.sub('',txt)
setregexp=re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
found=setregexp.findall(txt)
for(_,key,val)in found:
self.env[key]=val
self.env['LIB_KDECORE']=['kdecore']
self.env['LIB_KDEUI']=['kdeui']
self.env['LIB_KIO']=['kio']
self.env['LIB_KHTML']=['khtml']
self.env['LIB_KPARTS']=['kparts']
self.env['LIBPATH_KDECORE']=[os.path.join(self.env.KDE4_LIB_INSTALL_DIR,'kde4','devel'),self.env.KDE4_LIB_INSTALL_DIR]
self.env['INCLUDES_KDECORE']=[self.env['KDE4_INCLUDE_INSTALL_DIR']]
self.env.append_value('INCLUDES_KDECORE',[self.env['KDE4_INCLUDE_INSTALL_DIR']+os.sep+'KDE'])
self.find_program('msgfmt',var='MSGFMT')
| gpl-2.0 |
avedaee/DIRAC | TransformationSystem/Client/TransformationClient.py | 1 | 22189 | """ Class that contains client access to the transformation DB handler. """
__RCSID__ = "$Id$"
import types
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.Client import Client
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Resources.Catalog.FileCatalogueBase import FileCatalogueBase
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
rpc = None
url = None
class TransformationClient( Client, FileCatalogueBase ):
""" Exposes the functionality available in the DIRAC/TransformationHandler
This inherits the DIRAC base Client for direct execution of server functionality.
The following methods are available (although not visible here).
Transformation (table) manipulation
deleteTransformation(transName)
getTransformationParameters(transName,paramNames)
getTransformationWithStatus(status)
setTransformationParameter(transName,paramName,paramValue)
deleteTransformationParameter(transName,paramName)
TransformationFiles table manipulation
addFilesToTransformation(transName,lfns)
addTaskForTransformation(transName,lfns=[],se='Unknown')
getTransformationStats(transName)
TransformationTasks table manipulation
setTaskStatus(transName, taskID, status)
setTaskStatusAndWmsID(transName, taskID, status, taskWmsID)
getTransformationTaskStats(transName)
deleteTasks(transName, taskMin, taskMax)
extendTransformation( transName, nTasks)
getTasksToSubmit(transName,numTasks,site='')
TransformationLogging table manipulation
getTransformationLogging(transName)
File/directory manipulation methods (the remainder of the interface can be found below)
getFileSummary(lfns)
exists(lfns)
Web monitoring tools
getDistinctAttributeValues(attribute, selectDict)
getTransformationStatusCounters()
getTransformationSummary()
getTransformationSummaryWeb(selectDict, sortList, startItem, maxItems)
"""
def __init__( self, **kwargs ):
Client.__init__( self, **kwargs )
opsH = Operations()
self.maxResetCounter = opsH.getValue( 'Productions/ProductionFilesMaxResetCounter', 10 )
self.setServer( 'Transformation/TransformationManager' )
def setServer( self, url ):
self.serverURL = url
def getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None,
rpc = '', url = '' ):
rpcClient = self._getRPC( rpc = rpc, url = url )
return rpcClient. getCounters( table, attrList, condDict, older, newer, timeStamp )
def addTransformation( self, transName, description, longDescription, transType, plugin, agentType, fileMask,
transformationGroup = 'General',
groupSize = 1,
inheritedFrom = 0,
body = '',
maxTasks = 0,
eventsPerTask = 0,
addFiles = True,
rpc = '', url = '', timeout = 1800 ):
""" add a new transformation
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addTransformation( transName, description, longDescription, transType, plugin,
agentType, fileMask, transformationGroup, groupSize, inheritedFrom,
body, maxTasks, eventsPerTask, addFiles )
def getTransformations( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationDate',
orderAttribute = None, limit = 100, extraParams = False, rpc = '', url = '', timeout = None ):
""" gets all the transformations in the system, incrementally. "limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
transformations = []
# getting transformations - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformations( condDict, older, newer, timeStamp, orderAttribute, limit,
extraParams, offsetToApply )
if not res['OK']:
return res
else:
gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
if res['Value']:
transformations = transformations + res['Value']
offsetToApply += limit
if len( res['Value'] ) < limit:
break
return S_OK( transformations )
def getTransformation( self, transName, extraParams = False, rpc = '', url = '', timeout = None ):
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.getTransformation( transName, extraParams )
def getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = 10000, rpc = '', url = '', timeout = 1800 ):
""" gets all the transformation files for a transformation, incrementally.
"limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
transformationFiles = []
# getting transformationFiles - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformationFiles( condDict, older, newer, timeStamp, orderAttribute, limit, offsetToApply )
if not res['OK']:
return res
else:
gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
if res['Value']:
transformationFiles = transformationFiles + res['Value']
offsetToApply += limit
if len( res['Value'] ) < limit:
break
return S_OK( transformationFiles )
def getTransformationTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationTime',
orderAttribute = None, limit = 10000, inputVector = False, rpc = '',
url = '', timeout = None ):
""" gets all the transformation tasks for a transformation, incrementally.
"limit" here is just used to determine the offset.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
transformationTasks = []
# getting transformationFiles - incrementally
offsetToApply = 0
while True:
res = rpcClient.getTransformationTasks( condDict, older, newer, timeStamp, orderAttribute, limit,
inputVector, offsetToApply )
if not res['OK']:
return res
else:
gLogger.verbose( "Result for limit %d, offset %d: %d" % ( limit, offsetToApply, len( res['Value'] ) ) )
if res['Value']:
transformationTasks = transformationTasks + res['Value']
offsetToApply += limit
if len( res['Value'] ) < limit:
break
return S_OK( transformationTasks )
def cleanTransformation( self, transID, rpc = '', url = '', timeout = None ):
""" Clean the transformation, and set the status parameter (doing it here, for easier extensibility)
"""
# Cleaning
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
res = rpcClient.cleanTransformation( transID )
if not res['OK']:
return res
# Setting the status
return self.setTransformationParameter( transID, 'Status', 'TransformationCleaned' )
def moveFilesToDerivedTransformation( self, transDict, resetUnused = True ):
""" move files input to a transformation, to the derived one
"""
prod = transDict['TransformationID']
parentProd = int( transDict.get( 'InheritedFrom', 0 ) )
movedFiles = {}
if not parentProd:
gLogger.warn( "[None] [%d] .moveFilesToDerivedTransformation: Transformation was not derived..." % prod )
return S_OK( ( parentProd, movedFiles ) )
# get the lfns in status Unused/MaxReset of the parent production
res = self.getTransformationFiles( condDict = {'TransformationID': parentProd, 'Status': [ 'Unused', 'MaxReset' ]} )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error getting Unused files from transformation %s:" % ( prod, parentProd ), res['Message'] )
return res
parentFiles = res['Value']
lfns = [lfnDict['LFN'] for lfnDict in parentFiles]
if not lfns:
gLogger.info( "[None] [%d] .moveFilesToDerivedTransformation: No files found to be moved from transformation %d" % ( prod, parentProd ) )
return S_OK( ( parentProd, movedFiles ) )
# get the lfns of the derived production that were Unused/MaxReset in the parent one
res = self.getTransformationFiles( condDict = { 'TransformationID': prod, 'LFN': lfns} )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error getting files from derived transformation" % prod, res['Message'] )
return res
derivedFiles = res['Value']
suffix = '-%d' % parentProd
derivedStatusDict = dict( [( derivedDict['LFN'], derivedDict['Status'] ) for derivedDict in derivedFiles] )
newStatusFiles = {}
parentStatusFiles = {}
force = False
for parentDict in parentFiles:
lfn = parentDict['LFN']
derivedStatus = derivedStatusDict.get( lfn )
if derivedStatus:
parentStatus = parentDict['Status']
if resetUnused and parentStatus == 'MaxReset':
status = 'Unused'
moveStatus = 'Unused from MaxReset'
force = True
else:
status = parentStatus
moveStatus = parentStatus
if derivedStatus.endswith( suffix ):
# This file is Unused or MaxReset while it was most likely Assigned at the time of derivation
parentStatusFiles.setdefault( 'Moved-%s' % str( prod ), [] ).append( lfn )
newStatusFiles.setdefault( ( status, parentStatus ), [] ).append( lfn )
movedFiles[moveStatus] = movedFiles.setdefault( moveStatus, 0 ) + 1
elif parentDict['Status'] == 'Unused':
# If the file was Unused already at derivation time, set it NotProcessed
parentStatusFiles.setdefault( 'NotProcessed', [] ).append( lfn )
# Set the status in the parent transformation first
for status, lfnList in parentStatusFiles.items():
for lfnChunk in breakListIntoChunks( lfnList, 5000 ):
res = self.setFileStatusForTransformation( parentProd, status, lfnChunk )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d "
% ( prod, status, len( lfnList ), parentProd ),
res['Message'] )
# Set the status in the new transformation
for ( status, oldStatus ), lfnList in newStatusFiles.items():
for lfnChunk in breakListIntoChunks( lfnList, 5000 ):
res = self.setFileStatusForTransformation( prod, status, lfnChunk, force = force )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files; resetting them %s in transformation %d"
% ( prod, status, len( lfnChunk ), oldStatus, parentProd ),
res['Message'] )
res = self.setFileStatusForTransformation( parentProd, oldStatus, lfnChunk )
if not res['OK']:
gLogger.error( "[None] [%d] .moveFilesToDerivedTransformation: Error setting status %s for %d files in transformation %d"
% ( prod, oldStatus, len( lfnChunk ), parentProd ),
res['Message'] )
return S_OK( ( parentProd, movedFiles ) )
def setFileStatusForTransformation( self, transName, newLFNsStatus = {}, lfns = [], force = False,
rpc = '', url = '', timeout = 120 ):
""" sets the file status for LFNs of a transformation
For backward compatibility purposes, the status and LFNs can be passed in 2 ways:
- newLFNsStatus is a dictionary with the form:
{'/this/is/an/lfn1.txt': 'StatusA', '/this/is/an/lfn2.txt': 'StatusB', ... }
and at this point lfns is not considered
- newLFNStatus is a string, that applies to all the LFNs in lfns
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
# create dictionary in case newLFNsStatus is a string
if type( newLFNsStatus ) == type( '' ):
newLFNsStatus = dict( [( lfn, newLFNsStatus ) for lfn in lfns ] )
# gets status as of today
tsFiles = self.getTransformationFiles( {'TransformationID':transName, 'LFN': newLFNsStatus.keys()} )
if not tsFiles['OK']:
return tsFiles
tsFiles = tsFiles['Value']
if tsFiles:
# for convenience, makes a small dictionary out of the tsFiles, with the lfn as key
tsFilesAsDict = {}
for tsFile in tsFiles:
tsFilesAsDict[tsFile['LFN']] = [tsFile['Status'], tsFile['ErrorCount'], tsFile['FileID']]
# applying the state machine to the proposed status
newStatuses = self._applyTransformationFilesStateMachine( tsFilesAsDict, newLFNsStatus, force )
if newStatuses: # if there's something to update
# must do it for the file IDs...
newStatusForFileIDs = dict( [( tsFilesAsDict[lfn][2], newStatuses[lfn] ) for lfn in newStatuses.keys()] )
res = rpcClient.setFileStatusForTransformation( transName, newStatusForFileIDs )
if not res['OK']:
return res
return S_OK( newStatuses )
def _applyTransformationFilesStateMachine( self, tsFilesAsDict, dictOfProposedLFNsStatus, force ):
""" For easier extension, here we apply the state machine of the production files.
VOs might want to replace the standard here with something they prefer.
tsFiles is a dictionary with the lfn as key and as value a list of [Status, ErrorCount, FileID]
dictOfNewLFNsStatus is a dictionary with the proposed status
force is a boolean
It returns a dictionary with the status updates
"""
newStatuses = {}
for lfn in dictOfProposedLFNsStatus.keys():
if lfn not in tsFilesAsDict.keys():
continue
else:
newStatus = dictOfProposedLFNsStatus[lfn]
# Apply optional corrections
if tsFilesAsDict[lfn][0].lower() == 'processed' and dictOfProposedLFNsStatus[lfn].lower() != 'processed':
if not force:
newStatus = 'Processed'
elif tsFilesAsDict[lfn][0].lower() == 'maxreset':
if not force:
newStatus = 'MaxReset'
elif dictOfProposedLFNsStatus[lfn].lower() == 'unused':
errorCount = tsFilesAsDict[lfn][1]
# every 10 retries (by default)
if errorCount and ( ( errorCount % self.maxResetCounter ) == 0 ):
if not force:
newStatus = 'MaxReset'
if tsFilesAsDict[lfn][0].lower() != newStatus:
newStatuses[lfn] = newStatus
return newStatuses
def setTransformationParameter( self, transID, paramName, paramValue, force = False,
rpc = '', url = '', timeout = 120 ):
""" Sets a transformation parameter. There's a special case when coming to setting the status of a transformation.
"""
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
if paramName.lower() == 'status':
# get transformation Type
transformation = self.getTransformation( transID )
if not transformation['OK']:
return transformation
transformationType = transformation['Value']['Type']
# get status as of today
originalStatus = self.getTransformationParameters( transID, 'Status' )
if not originalStatus['OK']:
return originalStatus
originalStatus = originalStatus['Value']
transIDAsDict = {transID: [originalStatus, transformationType]}
dictOfProposedstatus = {transID: paramValue}
# applying the state machine to the proposed status
value = self._applyTransformationStatusStateMachine( transIDAsDict, dictOfProposedstatus, force )
else:
value = paramValue
return rpcClient.setTransformationParameter( transID, paramName, value )
def _applyTransformationStatusStateMachine( self, transIDAsDict, dictOfProposedstatus, force ):
""" For easier extension, here we apply the state machine of the transformation status.
VOs might want to replace the standard here with something they prefer.
transIDAsDict is a dictionary with the transID as key and as value a list with [Status, Type]
dictOfProposedstatus is a dictionary with the proposed status
force is a boolean
It returns the new status (the standard is just doing nothing: everything is possible)
"""
return dictOfProposedstatus.values()[0]
#####################################################################
#
# These are the file catalog interface methods
#
def isOK( self ):
return self.valid
def getName( self, DN = '' ):
""" Get the file catalog type name
"""
return self.name
def addDirectory( self, path, force = False, rpc = '', url = '', timeout = None ):
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addDirectory( path, force )
def getReplicas( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfns = res['Value'].keys()
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.getReplicas( lfns )
def addFile( self, lfn, force = False, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndicts = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addFile( lfndicts, force )
def addReplica( self, lfn, force = False, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndicts = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.addReplica( lfndicts, force )
def removeFile( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfns = res['Value'].keys()
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
successful = {}
failed = {}
listOfLists = breakListIntoChunks( lfns, 100 )
for fList in listOfLists:
res = rpcClient.removeFile( fList )
if not res['OK']:
return res
successful.update( res['Value']['Successful'] )
failed.update( res['Value']['Failed'] )
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
def removeReplica( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndicts = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
successful = {}
failed = {}
# as lfndicts is a dict, the breakListIntoChunks will fail. Fake it!
listOfDicts = []
localdicts = {}
for lfn, info in lfndicts.items():
localdicts.update( { lfn : info } )
if len( localdicts.keys() ) % 100 == 0:
listOfDicts.append( localdicts )
localdicts = {}
for fDict in listOfDicts:
res = rpcClient.removeReplica( fDict )
if not res['OK']:
return res
successful.update( res['Value']['Successful'] )
failed.update( res['Value']['Failed'] )
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
def getReplicaStatus( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndict = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.getReplicaStatus( lfndict )
def setReplicaStatus( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndict = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.setReplicaStatus( lfndict )
def setReplicaHost( self, lfn, rpc = '', url = '', timeout = None ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
lfndict = res['Value']
rpcClient = self._getRPC( rpc = rpc, url = url, timeout = timeout )
return rpcClient.setReplicaHost( lfndict )
def removeDirectory( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def createDirectory( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def createLink( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def removeLink( self, lfn, rpc = '', url = '', timeout = None ):
return self.__returnOK( lfn )
def __returnOK( self, lfn ):
res = self.__checkArgumentFormat( lfn )
if not res['OK']:
return res
successful = {}
for lfn in res['Value'].keys():
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':{}}
return S_OK( resDict )
def __checkArgumentFormat( self, path ):
if type( path ) in types.StringTypes:
urls = {path:False}
elif type( path ) == types.ListType:
urls = {}
for url in path:
urls[url] = False
elif type( path ) == types.DictType:
urls = path
else:
return S_ERROR( "TransformationClient.__checkArgumentFormat: Supplied path is not of the correct format." )
return S_OK( urls )
| gpl-3.0 |
varunarya10/nova_test_latest | nova/db/sqlalchemy/migrate_repo/versions/275_add_keypair_type.py | 79 | 1459 | # Copyright (c) 2015 Cloudbase Solutions SRL
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Column, Table
from sqlalchemy import Enum
from nova.objects import keypair
def upgrade(migrate_engine):
"""Function adds key_pairs type field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)
enum = Enum('ssh', 'x509', metadata=meta, name='keypair_types')
enum.create()
keypair_type = Column('type', enum, nullable=False,
server_default=keypair.KEYPAIR_TYPE_SSH)
if hasattr(key_pairs.c, 'type'):
key_pairs.c.type.drop()
if hasattr(shadow_key_pairs.c, 'type'):
shadow_key_pairs.c.type.drop()
key_pairs.create_column(keypair_type)
shadow_key_pairs.create_column(keypair_type.copy())
| apache-2.0 |
deveninfotech/deven-frappe | frappe/tests/test_email.py | 31 | 2714 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os, sys
import unittest, frappe
from frappe.test_runner import make_test_records
make_test_records("User")
class TestEmail(unittest.TestCase):
def setUp(self):
frappe.db.sql("""update tabUser set unsubscribed=0""")
frappe.db.sql("""delete from `tabBulk Email`""")
def test_send(self):
from frappe.utils.email_lib import sendmail
#sendmail('test@example.com', subject='Test Mail', msg="Test Content")
def test_bulk(self):
from frappe.utils.email_lib.bulk import send
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
doctype='User', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Not Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_flush(self):
self.test_bulk()
from frappe.utils.email_lib.bulk import flush
flush(from_test=True)
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
def test_unsubscribe(self):
from frappe.utils.email_lib.bulk import unsubscribe, send
frappe.local.form_dict = frappe._dict({
'email':'test@example.com',
'type':'User',
'email_field':'email',
"from_test": True
})
unsubscribe()
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
doctype='User', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Not Sent'""",
as_dict=1)
self.assertEquals(len(bulk), 1)
self.assertFalse('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_bulk_limit(self):
from frappe.utils.email_lib.bulk import unsubscribe, send, BulkLimitCrossedError
self.assertRaises(BulkLimitCrossedError, send,
recipients=['test@example.com']*1000,
sender="admin@example.com",
doctype='User', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
if __name__=='__main__':
frappe.connect()
unittest.main()
| mit |
ppaez/flyinghigh-opengl-from-python | flyinghigh/component/camera.py | 2 | 1257 | from __future__ import division
from math import sin, cos
from .. import gl, glu
class CameraBase(object):
def __init__(self):
self.item = None
def reset(self):
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
class Camera(CameraBase):
def __init__(self):
super(Camera, self).__init__()
def look_at(self, lookat):
'''
lookat is a tuple (x, y, z), towards which the camera should point
'''
position = self.item.position
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
glu.gluLookAt(
position.x, position.y, position.z,
lookat.x, lookat.y, lookat.z,
0, 1, 0)
class CameraOrtho(CameraBase):
def __init__(self):
super(CameraOrtho, self).__init__()
self.angle = 0.0
def look_at(self, lookat):
'''
lookat is a tuple (x, y), towards which the camera should point
'''
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
glu.gluLookAt(
self.position.x, self.position.y, +1.0,
lookat.x, lookat.y, -1.0,
sin(self.angle), cos(self.angle), 0.0)
| bsd-3-clause |
niboshi/chainer | tests/chainer_tests/optimizer_hooks_tests/test_gradient_noise.py | 4 | 2820 | import itertools
import unittest
import mock
import numpy as np
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
import utils
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientNoise(unittest.TestCase):
eta = 0.01
def setUp(self):
self.target = utils.ParametersLink.from_param_props(
# TODO(niboshi): Use different shapes
((2, 3), (2, 3), (2, 3)))
self.noise_value = np.random.normal(
loc=0, scale=np.sqrt(self.eta / np.power(1, 0.55)),
size=(2, 3)).astype(np.float32)
def check_gradient_noise(self, backend_configs):
target = self.target
assert len(backend_configs) == len(list(target.params()))
devices = [bc.device for bc in backend_configs]
noise_value = np.asarray(self.noise_value)
expects = []
# Compute expected
for param, device in zip(target.params(), devices):
expects.append(param.array - param.grad - noise_value)
param.to_device(device)
def test_noise(xp, shape, dtype, hook, opt):
# Make noise value an array of current backend
return xp.array(noise_value)
noise = mock.Mock(side_effect=test_noise)
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
hook = optimizer_hooks.GradientNoise(self.eta, noise_func=noise)
opt.add_hook(hook)
opt.update()
# Validate
for expect, param in zip(expects, target.params()):
testing.assert_allclose(expect, param.array)
self.assertEqual(noise.call_count, len(tuple(self.target.params())))
calls = []
for param in target.params():
xp = param.device.xp
calls.append(mock.call(xp, (2, 3), np.dtype('float32'), hook,
param.update_rule))
# Order does not matter
assert(any([noise.mock_calls == list(permuted_calls)
for permuted_calls in itertools.permutations(calls)]))
def test_gradient_noise(self, backend_config0,
backend_config1, backend_config2):
self.check_gradient_noise(
[backend_config0, backend_config1, backend_config2])
testing.run_module(__name__, __file__)
| mit |
omriiluz/ansible-modules-core | cloud/rackspace/rax_identity.py | 150 | 3026 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_identity
short_description: Load Rackspace Cloud Identity
description:
- Verifies Rackspace Cloud credentials and returns identity information
version_added: "1.5"
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Load Rackspace Cloud Identity
gather_facts: False
hosts: local
connection: local
tasks:
- name: Load Identity
local_action:
module: rax_identity
credentials: ~/.raxpub
region: DFW
register: rackspace_identity
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_identity(module, state, identity):
instance = dict(
authenticated=identity.authenticated,
credentials=identity._creds_file
)
changed = False
instance.update(rax_to_dict(identity))
instance['services'] = instance.get('services', {}).keys()
if state == 'present':
if not identity.authenticated:
module.fail_json(msg='Credentials could not be verified!')
module.exit_json(changed=changed, identity=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
setup_rax_module(module, pyrax)
if not pyrax.identity:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
cloud_identity(module, state, pyrax.identity)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
| gpl-3.0 |
40223202/2015cdb_g2 | static/Brython3.1.1-20150328-091302/Lib/select.py | 730 | 9440 | """
borrowed from jython
https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py
"""
#import java.nio.channels.SelectableChannel
#import java.nio.channels.SelectionKey
#import java.nio.channels.Selector
#from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ
import errno
import os
import queue
import socket
class error(Exception): pass
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent>
#(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'),
}
def _map_exception(exc, circumstance=ALL):
try:
mapped_exception = _exception_map[(exc.__class__, circumstance)]
mapped_exception.java_exception = exc
return mapped_exception
except KeyError:
return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance))
POLLIN = 1
POLLOUT = 2
# The following event types are completely ignored on jython
# Java does not support them, AFAICT
# They are declared only to support code compatibility with cpython
POLLPRI = 4
POLLERR = 8
POLLHUP = 16
POLLNVAL = 32
def _getselectable(selectable_object):
try:
channel = selectable_object.getchannel()
except:
try:
channel = selectable_object.fileno().getChannel()
except:
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
if channel and not isinstance(channel, java.nio.channels.SelectableChannel):
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
return channel
class poll:
def __init__(self):
self.selector = java.nio.channels.Selector.open()
self.chanmap = {}
self.unconnected_sockets = []
def _register_channel(self, socket_object, channel, mask):
jmask = 0
if mask & POLLIN:
# Note that OP_READ is NOT a valid event on server socket channels.
if channel.validOps() & OP_ACCEPT:
jmask = OP_ACCEPT
else:
jmask = OP_READ
if mask & POLLOUT:
if channel.validOps() & OP_WRITE:
jmask |= OP_WRITE
if channel.validOps() & OP_CONNECT:
jmask |= OP_CONNECT
selectionkey = channel.register(self.selector, jmask)
self.chanmap[channel] = (socket_object, selectionkey)
def _check_unconnected_sockets(self):
temp_list = []
for socket_object, mask in self.unconnected_sockets:
channel = _getselectable(socket_object)
if channel is not None:
self._register_channel(socket_object, channel, mask)
else:
temp_list.append( (socket_object, mask) )
self.unconnected_sockets = temp_list
def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI):
try:
channel = _getselectable(socket_object)
if channel is None:
# The socket is not yet connected, and thus has no channel
# Add it to a pending list, and return
self.unconnected_sockets.append( (socket_object, mask) )
return
self._register_channel(socket_object, channel, mask)
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def unregister(self, socket_object):
try:
channel = _getselectable(socket_object)
self.chanmap[channel][1].cancel()
del self.chanmap[channel]
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _dopoll(self, timeout):
if timeout is None or timeout < 0:
self.selector.select()
else:
try:
timeout = int(timeout)
if not timeout:
self.selector.selectNow()
else:
# No multiplication required: both cpython and java use millisecond timeouts
self.selector.select(timeout)
except ValueError as vx:
raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL)
# The returned selectedKeys cannot be used from multiple threads!
return self.selector.selectedKeys()
def poll(self, timeout=None):
try:
self._check_unconnected_sockets()
selectedkeys = self._dopoll(timeout)
results = []
for k in selectedkeys.iterator():
jmask = k.readyOps()
pymask = 0
if jmask & OP_READ: pymask |= POLLIN
if jmask & OP_WRITE: pymask |= POLLOUT
if jmask & OP_ACCEPT: pymask |= POLLIN
if jmask & OP_CONNECT: pymask |= POLLOUT
# Now return the original userobject, and the return event mask
results.append( (self.chanmap[k.channel()][0], pymask) )
return results
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _deregister_all(self):
try:
for k in self.selector.keys():
k.cancel()
# Keys are not actually removed from the selector until the next select operation.
self.selector.selectNow()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
self._deregister_all()
self.selector.close()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _calcselecttimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except Exception as x:
raise TypeError("Select timeout value must be a number or None")
if value < 0:
raise error("Select timeout value cannot be negative", errno.EINVAL)
if floatvalue < 0.000001:
return 0
return int(floatvalue * 1000) # Convert to milliseconds
# This cache for poll objects is required because of a bug in java on MS Windows
# http://bugs.jython.org/issue1291
class poll_object_cache:
def __init__(self):
self.is_windows = os.name == 'nt'
if self.is_windows:
self.poll_object_queue = Queue.Queue()
import atexit
atexit.register(self.finalize)
def get_poll_object(self):
if not self.is_windows:
return poll()
try:
return self.poll_object_queue.get(False)
except Queue.Empty:
return poll()
def release_poll_object(self, pobj):
if self.is_windows:
pobj._deregister_all()
self.poll_object_queue.put(pobj)
else:
pobj.close()
def finalize(self):
if self.is_windows:
while True:
try:
p = self.poll_object_queue.get(False)
p.close()
except Queue.Empty:
return
_poll_object_cache = poll_object_cache()
def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
timeout = _calcselecttimeoutvalue(timeout)
# First create a poll object to do the actual watching.
pobj = _poll_object_cache.get_poll_object()
try:
registered_for_read = {}
# Check the read list
for fd in read_fd_list:
pobj.register(fd, POLLIN)
registered_for_read[fd] = 1
# And now the write list
for fd in write_fd_list:
if fd in registered_for_read:
# registering a second time overwrites the first
pobj.register(fd, POLLIN|POLLOUT)
else:
pobj.register(fd, POLLOUT)
results = pobj.poll(timeout)
# Now start preparing the results
read_ready_list, write_ready_list, oob_ready_list = [], [], []
for fd, mask in results:
if mask & POLLIN:
read_ready_list.append(fd)
if mask & POLLOUT:
write_ready_list.append(fd)
return read_ready_list, write_ready_list, oob_ready_list
finally:
_poll_object_cache.release_poll_object(pobj)
select = native_select
def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
# First turn all sockets to non-blocking
# keeping track of which ones have changed
modified_channels = []
try:
for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]:
for s in socket_list:
channel = _getselectable(s)
if channel.isBlocking():
modified_channels.append(channel)
channel.configureBlocking(0)
return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout)
finally:
for channel in modified_channels:
channel.configureBlocking(1)
| gpl-3.0 |
Tecnativa/website | website_event_register_free/model/__init__.py | 30 | 1041 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import ir_ui_view
from . import event_registration
| agpl-3.0 |
charbeljc/OCB | addons/account/project/wizard/account_analytic_journal_report.py | 378 | 3164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_journal_report(osv.osv_memory):
_name = 'account.analytic.journal.report'
_description = 'Account Analytic Journal'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'analytic_account_journal_id': fields.many2many('account.analytic.journal', 'account_analytic_journal_name', 'journal_line_id', 'journal_print_id', 'Analytic Journals', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
ids_list = []
if context.get('active_id',False):
ids_list.append(context.get('active_id',False))
else:
record = self.browse(cr,uid,ids[0],context=context)
for analytic_record in record.analytic_account_journal_id:
ids_list.append(analytic_record.id)
datas = {
'ids': ids_list,
'model': 'account.analytic.journal',
'form': data
}
context2 = context.copy()
context2['active_model'] = 'account.analytic.journal'
context2['active_ids'] = ids_list
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticjournal', data=datas, context=context2)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(account_analytic_journal_report, self).default_get(cr, uid, fields, context=context)
if not context.has_key('active_ids'):
journal_ids = self.pool.get('account.analytic.journal').search(cr, uid, [], context=context)
else:
journal_ids = context.get('active_ids')
if 'analytic_account_journal_id' in fields:
res.update({'analytic_account_journal_id': journal_ids})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kylerbrown/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
dezynetechnologies/odoo | addons/gamification/__openerp__.py | 299 | 2464 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Human Resources',
'website' : 'https://www.odoo.com/page/gamification',
'depends': ['mail', 'email_template', 'web_kanban_gauge'],
'description': """
Gamification process
====================
The Gamification module provides ways to evaluate and motivate the users of OpenERP.
The users can be evaluated using goals and numerical objectives to reach.
**Goals** are assigned through **challenges** to evaluate and compare members of a team with each others and through time.
For non-numerical achievements, **badges** can be granted to users. From a simple "thank you" to an exceptional achievement, a badge is an easy way to exprimate gratitude to a user for their good work.
Both goals and badges are flexibles and can be adapted to a large range of modules and actions. When installed, this module creates easy goals to help new users to discover OpenERP and configure their user profile.
""",
'data': [
'wizard/update_goal.xml',
'wizard/grant_badge.xml',
'views/badge.xml',
'views/challenge.xml',
'views/goal.xml',
'data/cron.xml',
'security/gamification_security.xml',
'security/ir.model.access.csv',
'data/goal_base.xml',
'data/badge.xml',
'views/gamification.xml',
],
'application': True,
'auto_install': False,
'qweb': ['static/src/xml/gamification.xml'],
}
| agpl-3.0 |
harshadyeola/easyengine | tests/cli/test_stack_remove.py | 9 | 2304 | from ee.utils import test
from ee.cli.main import get_test_app
class CliTestCaseStack(test.EETestCase):
def test_ee_cli(self):
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_remove_web(self):
self.app = get_test_app(argv=['stack', 'remove', '--web'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_admin(self):
self.app = get_test_app(argv=['stack', 'remove', '--admin'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_mail(self):
self.app = get_test_app(argv=['stack', 'remove', '--mail'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_nginx(self):
self.app = get_test_app(argv=['stack', 'remove', '--nginx'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_php(self):
self.app = get_test_app(argv=['stack', 'remove', '--php'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_mysql(self):
self.app = get_test_app(argv=['stack', 'remove', '--mysql'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_postfix(self):
self.app = get_test_app(argv=['stack', 'remove', '--postfix'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_wpcli(self):
self.app = get_test_app(argv=['stack', 'remove', '--wpcli'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_phpmyadmin(self):
self.app = get_test_app(argv=['stack', 'remove', '--phpmyadmin'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_adminer(self):
self.app = get_test_app(argv=['stack', 'remove', '--adminer'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_install_utils(self):
self.app = get_test_app(argv=['stack', 'remove', '--utils'])
self.app.setup()
self.app.run()
self.app.close()
| mit |
keedio/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/shared/exc.py | 118 | 2259 | # file openpyxl/shared/exc.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Definitions for openpyxl shared exception classes."""
class CellCoordinatesException(Exception):
"""Error for converting between numeric and A1-style cell references."""
class ColumnStringIndexException(Exception):
"""Error for bad column names in A1-style cell references."""
class DataTypeException(Exception):
"""Error for any data type inconsistencies."""
class NamedRangeException(Exception):
"""Error for badly formatted named ranges."""
class SheetTitleException(Exception):
"""Error for bad sheet names."""
class InsufficientCoordinatesException(Exception):
"""Error for partially specified cell coordinates."""
class OpenModeError(Exception):
"""Error for fileobj opened in non-binary mode."""
class InvalidFileException(Exception):
"""Error for trying to open a non-ooxml file."""
class ReadOnlyWorkbookException(Exception):
"""Error for trying to modify a read-only workbook"""
class MissingNumberFormat(Exception):
"""Error when a referenced number format is not in the stylesheet"""
| apache-2.0 |
code4futuredotorg/reeborg_tw | src/libraries/brython/Lib/xml/dom/pulldom.py | 850 | 11761 | import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| agpl-3.0 |
Silmathoron/nest-simulator | pynest/nest/tests/test_parameter.py | 20 | 3163 | # -*- coding: utf-8 -*-
#
# test_parameter.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Simple Parameter tests
"""
import nest
import unittest
class TestParameter(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
def test_add(self):
low = 55.
high = 75.
val = 33.
e = nest.random.uniform(low, high) + val
self.assertGreater(e.GetValue(), low + val)
self.assertLess(e.GetValue(), high + val)
def test_radd(self):
low = 55.
high = 75.
val = 33.
e = val + nest.random.uniform(low, high)
self.assertGreater(e.GetValue(), low + val)
self.assertLess(e.GetValue(), high + val)
def test_sub(self):
low = 55.
high = 75.
val = 33.
e = nest.random.uniform(low, high) - val
self.assertGreater(e.GetValue(), low - val)
self.assertLess(e.GetValue(), high - val)
def test_rsub(self):
low = 55.
high = 75.
val = 33.
e = val - nest.random.uniform(low, high)
self.assertGreater(e.GetValue(), val - high)
self.assertLess(e.GetValue(), val - low)
def test_neg(self):
low = 55.
high = 75.
e = -nest.random.uniform(low, high)
self.assertGreater(e.GetValue(), -high)
self.assertLess(e.GetValue(), -low)
def test_rsub_all_neg(self):
low = -55.
high = -75.
val = -33.
e = val - nest.random.uniform(high, low)
self.assertGreater(e.GetValue(), val - low)
self.assertLess(e.GetValue(), val - high)
def test_mul(self):
low = 55.
high = 75.
val = 3.
e = nest.random.uniform(low, high) * val
self.assertGreater(e.GetValue(), low * val)
self.assertLess(e.GetValue(), high * val)
def test_rmul(self):
low = 55.
high = 75.
val = 3.
e = val * nest.random.uniform(low, high)
self.assertGreater(e.GetValue(), val * low)
self.assertLess(e.GetValue(), val * high)
def test_div(self):
low = 55.
high = 75.
val = 3.
e = nest.random.uniform(low, high) / val
self.assertGreater(e.GetValue(), low / val)
self.assertLess(e.GetValue(), high / val)
def suite():
suite = unittest.makeSuite(TestParameter, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
ptisserand/ansible | lib/ansible/modules/system/lvol.py | 31 | 19498 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- Jeroen Hoekx (@jhoekx)
- Alexander Bulimov (@abulimov)
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
lv:
description:
- The name of the logical volume.
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
choices: [ absent, present ]
default: present
active:
description:
- Whether the volume is activate and visible to the host.
type: bool
default: 'yes'
version_added: "2.2"
force:
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
type: bool
default: 'no'
version_added: "1.5"
opts:
description:
- Free-form options to be passed to the lvcreate command.
version_added: "2.0"
snapshot:
description:
- The name of the snapshot volume
version_added: "2.1"
pvs:
description:
- Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
version_added: "2.2"
thinpool:
description:
- The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
version_added: "2.5"
shrink:
description:
- Shrink if current size is higher than size requested.
type: bool
default: 'yes'
version_added: "2.2"
resizefs:
description:
- Resize the underlying filesystem together with the logical volume.
type: bool
default: 'yes'
version_added: "2.5"
notes:
- You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
'''
EXAMPLES = '''
- name: Create a logical volume of 512m
lvol:
vg: firefly
lv: test
size: 512
- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
lvol:
vg: firefly
lv: test
size: 512
pvs: /dev/sda,/dev/sdb
- name: Create cache pool logical volume
lvol:
vg: firefly
lv: lvcache
size: 512m
opts: --type cache-pool
- name: Create a logical volume of 512g.
lvol:
vg: firefly
lv: test
size: 512g
- name: Create a logical volume the size of all remaining space in the volume group
lvol:
vg: firefly
lv: test
size: 100%FREE
- name: Create a logical volume with special options
lvol:
vg: firefly
lv: test
size: 512g
opts: -r 16
- name: Extend the logical volume to 1024m.
lvol:
vg: firefly
lv: test
size: 1024
- name: Extend the logical volume to consume all remaining space in the volume group
lvol:
vg: firefly
lv: test
size: +100%FREE
- name: Extend the logical volume to take all remaining space of the PVs
lvol:
vg: firefly
lv: test
size: 100%PVS
resizefs: true
- name: Resize the logical volume to % of VG
lvol:
vg: firefly
lv: test
size: 80%VG
force: yes
- name: Reduce the logical volume to 512m
lvol:
vg: firefly
lv: test
size: 512
force: yes
- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
lvol:
vg: firefly
lv: test
size: 512
shrink: no
- name: Remove the logical volume.
lvol:
vg: firefly
lv: test
state: absent
force: yes
- name: Create a snapshot volume of the test logical volume.
lvol:
vg: firefly
lv: test
snapshot: snap1
size: 100m
- name: Deactivate a logical volume
lvol:
vg: firefly
lv: test
active: false
- name: Create a deactivated logical volume
lvol:
vg: firefly
lv: test
size: 512g
active: false
- name: Create a thin pool of 512g
lvol:
vg: firefly
thinpool: testpool
size: 512g
- name: Create a thin volume of 128g
lvol:
vg: firefly
lv: test
thinpool: testpool
size: 128g
'''
import re
from ansible.module_utils.basic import AnsibleModule
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[', '').replace(']', ''),
'size': int(decimal_point.match(parts[1]).group(1)),
'active': (parts[2][4] == 'a'),
'thinpool': (parts[2][0] == 't'),
'thinvol': (parts[2][0] == 'V'),
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str'),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
force=dict(type='bool', default=False),
shrink=dict(type='bool', default=True),
active=dict(type='bool', default=True),
snapshot=dict(type='str'),
pvs=dict(type='str'),
resizefs=dict(type='bool', default=False),
thinpool=dict(type='str'),
),
supports_check_mode=True,
required_one_of=(
['lv', 'thinpool'],
),
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found is None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
active = module.boolean(module.params['active'])
resizefs = module.boolean(module.params['resizefs'])
thinpool = module.params['thinpool']
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if '%' not in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit():
raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot:
# Check snapshot pre-conditions
for test_lv in lvs:
if test_lv['name'] == lv or test_lv['name'] == thinpool:
if not test_lv['thinpool'] and not thinpool:
break
else:
module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
else:
module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
check_lv = snapshot
elif thinpool:
if lv:
# Check thin volume pre-conditions
for test_lv in lvs:
if test_lv['name'] == thinpool:
break
else:
module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
check_lv = lv
else:
check_lv = thinpool
else:
check_lv = lv
for test_lv in lvs:
if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
this_lv = test_lv
break
else:
this_lv = None
msg = ''
if this_lv is None:
if state == 'present':
# Require size argument except for snapshot of thin volumes
if (lv or thinpool) and not size:
for test_lv in lvs:
if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
break
else:
module.fail_json(msg="No size given.")
# create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
if size:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
elif thinpool and lv:
if size_opt == 'l':
module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
size_opt = 'V'
cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
elif thinpool and not lv:
cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
# remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif not size:
pass
elif size_opt == 'l':
# Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(
msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
(this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
)
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if resizefs:
tool = '%s %s' % (tool, '--resizefs')
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
# resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
if resizefs:
tool = '%s %s' % (tool, '--resizefs')
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
if this_lv is not None:
if active:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
else:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
cineuse/CNCGToolKit | pyLibs/PIL/DcxImagePlugin.py | 40 | 1844 | #
# The Python Imaging Library.
# $Id$
#
# DCX file handling
#
# DCX is a container file format defined by Intel, commonly used
# for fax applications. Each DCX file consists of a directory
# (a list of file offsets) followed by a set of (usually 1-bit)
# PCX files.
#
# History:
# 1995-09-09 fl Created
# 1996-03-20 fl Properly derived from PcxImageFile.
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 2002-07-30 fl Fixed file handling
#
# Copyright (c) 1997-98 by Secret Labs AB.
# Copyright (c) 1995-96 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
import Image
from PcxImagePlugin import PcxImageFile
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
def i32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
def _accept(prefix):
return i32(prefix) == MAGIC
##
# Image plugin for the Intel DCX format.
class DcxImageFile(PcxImageFile):
format = "DCX"
format_description = "Intel DCX"
def _open(self):
# Header
s = self.fp.read(4)
if i32(s) != MAGIC:
raise SyntaxError, "not a DCX file"
# Component directory
self._offset = []
for i in range(1024):
offset = i32(self.fp.read(4))
if not offset:
break
self._offset.append(offset)
self.__fp = self.fp
self.seek(0)
def seek(self, frame):
if frame >= len(self._offset):
raise EOFError("attempt to seek outside DCX directory")
self.frame = frame
self.fp = self.__fp
self.fp.seek(self._offset[frame])
PcxImageFile._open(self)
def tell(self):
return self.frame
Image.register_open("DCX", DcxImageFile, _accept)
Image.register_extension("DCX", ".dcx")
| mit |
sql-machine-learning/sqlflow | python/runtime/dbapi/hive.py | 1 | 3946 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
try:
from impala.dbapi import connect
except: # noqa E722
pass
from runtime.dbapi.connection import Connection, ResultSet
class HiveResultSet(ResultSet):
def __init__(self, cursor, err=None):
super(HiveResultSet, self).__init__()
self._cursor = cursor
self._column_info = None
self._raw_column_info = None
self._err = err
def _fetch(self, fetch_size):
return self._cursor.fetchmany(fetch_size)
def _fill_column_info(self):
columns = []
raw_columns = []
for desc in self._cursor.description:
name = desc[0].split('.')[-1]
columns.append((name, desc[1]))
raw_columns.append((desc[0], desc[1]))
self._column_info = columns
self._raw_column_info = raw_columns
def raw_column_info(self):
if self._raw_column_info is None:
self._fill_column_info()
return self._raw_column_info
def column_info(self):
"""Get the result column meta, type in the meta maybe DB-specific
Returns:
A list of column metas, like [(field_a, INT), (field_b, STRING)]
"""
if self._column_info is None:
self._fill_column_info()
return self._column_info
def success(self):
"""Return True if the query is success"""
return self._cursor is not None
def error(self):
return self._err
def close(self):
"""Close the ResultSet explicitly, release any
resource incurred by this query"""
if self._cursor:
self._cursor.close()
self._cursor = None
class HiveConnection(Connection):
"""Hive connection
conn_uri: uri in format:
hive://usr:pswd@hiveserver:10000/mydb?auth=PLAIN&session.mapred=mr
All params start with 'session.' will be treated as session
configuration
"""
def __init__(self, conn_uri):
super(HiveConnection, self).__init__(conn_uri)
self.driver = "hive"
self.params["database"] = self.uripts.path.strip("/")
self._conn = connect(user=self.uripts.username,
password=self.uripts.password,
database=self.params["database"],
host=self.uripts.hostname,
port=self.uripts.port,
auth_mechanism=self.params.get("auth"))
self._session_cfg = dict([(k, v) for (k, v) in self.params.items()
if k.startswith("session.")])
def _get_result_set(self, statement):
cursor = self._conn.cursor(user=self.uripts.username,
configuration=self._session_cfg)
try:
cursor.execute(statement.rstrip(";"))
return HiveResultSet(cursor)
except Exception as e:
cursor.close()
return HiveResultSet(None, str(e))
def cursor(self):
"""Get a cursor on the connection
We insist not to use the low level api like cursor.
Instead, we can directly use query/exec
"""
return self._conn.cursor()
def commit(self):
return self._conn.commit()
def close(self):
if self._conn:
self._conn.close()
self._conn = None
| apache-2.0 |
kvar/ansible | lib/ansible/module_utils/network/iosxr/providers/cli/config/bgp/process.py | 38 | 3659 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.iosxr.providers.providers import register_provider
from ansible.module_utils.network.iosxr.providers.providers import CliProvider
from ansible.module_utils.network.iosxr.providers.cli.config.bgp.neighbors import Neighbors
from ansible.module_utils.network.iosxr.providers.cli.config.bgp.address_family import AddressFamily
REDISTRIBUTE_PROTOCOLS = frozenset(['ospf', 'ospfv3', 'eigrp', 'isis', 'static',
'connected', 'lisp', 'mobile', 'rip',
'subscriber'])
@register_provider('iosxr', 'iosxr_bgp')
class Provider(CliProvider):
def render(self, config=None):
commands = list()
existing_as = None
if config:
match = re.search(r'router bgp (\d+)', config, re.M)
if match:
existing_as = match.group(1)
operation = self.params['operation']
context = None
if self.params['config']:
context = 'router bgp %s' % self.get_value('config.bgp_as')
if operation == 'delete':
if existing_as:
commands.append('no router bgp %s' % existing_as)
elif context:
commands.append('no %s' % context)
else:
if operation == 'replace':
if existing_as and int(existing_as) != self.get_value('config.bgp_as'):
# The negate command has to be committed before new BGP AS is used.
self.connection.edit_config('no router bgp %s' % existing_as)
config = None
elif operation == 'override':
if existing_as:
# The negate command has to be committed before new BGP AS is used.
self.connection.edit_config('no router bgp %s' % existing_as)
config = None
context_commands = list()
for key, value in iteritems(self.get_value('config')):
if value is not None:
meth = getattr(self, '_render_%s' % key, None)
if meth:
resp = meth(config)
if resp:
context_commands.extend(to_list(resp))
if context and context_commands:
commands.append(context)
commands.extend(context_commands)
commands.append('exit')
return commands
def _render_router_id(self, config=None):
cmd = 'bgp router-id %s' % self.get_value('config.router_id')
if not config or cmd not in config:
return cmd
def _render_log_neighbor_changes(self, config=None):
cmd = 'bgp log neighbor changes'
log_neighbor_changes = self.get_value('config.log_neighbor_changes')
if log_neighbor_changes is True:
if not config or cmd not in config:
return '%s detail' % cmd
elif log_neighbor_changes is False:
if config and cmd in config:
return '%s disable' % cmd
def _render_neighbors(self, config):
""" generate bgp neighbor configuration
"""
return Neighbors(self.params).render(config)
def _render_address_family(self, config):
""" generate address-family configuration
"""
return AddressFamily(self.params).render(config)
| gpl-3.0 |
andrewfu0325/gem5-aladdin | configs/common/Caches.py | 1 | 3830 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
from m5.objects import *
# Base implementations of L1, L2, IO and TLB-walker caches. There are
# used in the regressions and also as base components in the
# system-configuration scripts. The values are meant to serve as a
# starting point, and specific parameters can be overridden in the
# specific instantiations.
class L1Cache(BaseCache):
assoc = 2
hit_latency = 1
response_latency = 0
mshrs = 16 #coretex-a15
tgts_per_mshr = 20
is_top_level = True
is_perfect_cache = False
class L2Cache(BaseCache):
assoc = 8
hit_latency = 12
response_latency = 6
mshrs = 20
tgts_per_mshr = 12
write_buffers = 8
is_perfect_cache = False
class IOCache(BaseCache):
assoc = 8
hit_latency = 50
response_latency = 50
mshrs = 20
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
is_perfect_cache = False
class PageTableWalkerCache(BaseCache):
assoc = 2
hit_latency = 2
response_latency = 2
mshrs = 10
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
is_perfect_cache = False
class L1TaggedPrefetchCache(L1Cache):
prefetch_on_access = 'true'
prefetcher = TaggedPrefetcher(degree=8, latency=1)
class L2TaggedPrefetchCache(L2Cache):
prefetch_on_access = 'true'
prefetcher = TaggedPrefetcher(degree=8, latency=1)
class L1StridePrefetchCache(L1Cache):
prefetch_on_access = 'true'
prefetcher = StridePrefetcher(degree=8, latency=1)
class L2StridePrefetchCache(L2Cache):
prefetch_on_access = 'true'
prefetcher = StridePrefetcher(degree=8, latency=1)
| bsd-3-clause |
vitaly4uk/django | django/utils/six.py | 408 | 30194 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
| bsd-3-clause |
Mohamed711/Quiz-Program | vendor/bundle/ruby/2.2.0/gems/libv8-3.16.14.7/vendor/v8/tools/testrunner/objects/peer.py | 123 | 3561 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Peer(object):
def __init__(self, address, jobs, rel_perf, pubkey):
self.address = address # string: IP address
self.jobs = jobs # integer: number of CPUs
self.relative_performance = rel_perf
self.pubkey = pubkey # string: pubkey's fingerprint
self.shells = set() # set of strings
self.needed_work = 0
self.assigned_work = 0
self.tests = [] # list of TestCase objects
self.trusting_me = False # This peer trusts my public key.
self.trusted = False # I trust this peer's public key.
def __str__(self):
return ("Peer at %s, jobs: %d, performance: %.2f, trust I/O: %s/%s" %
(self.address, self.jobs, self.relative_performance,
self.trusting_me, self.trusted))
def AddTests(self, shell):
"""Adds tests from |shell| to this peer.
Stops when self.needed_work reaches zero, or when all of shell's tests
are assigned."""
assert self.needed_work > 0
if shell.shell not in self.shells:
self.shells.add(shell.shell)
while len(shell.tests) > 0 and self.needed_work > 0:
t = shell.tests.pop()
self.needed_work -= t.duration
self.assigned_work += t.duration
shell.total_duration -= t.duration
self.tests.append(t)
def ForceAddOneTest(self, test, shell):
"""Forcibly adds another test to this peer, disregarding needed_work."""
if shell.shell not in self.shells:
self.shells.add(shell.shell)
self.needed_work -= test.duration
self.assigned_work += test.duration
shell.total_duration -= test.duration
self.tests.append(test)
def Pack(self):
"""Creates a JSON serializable representation of this Peer."""
return [self.address, self.jobs, self.relative_performance]
@staticmethod
def Unpack(packed):
"""Creates a Peer object built from a packed representation."""
pubkey_dummy = "" # Callers of this don't care (only the server does).
return Peer(packed[0], packed[1], packed[2], pubkey_dummy)
| cc0-1.0 |
tensorflow/tensorflow | tensorflow/python/ops/rnn_grad_test.py | 17 | 5334 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gradients of (block) LSTM/GRU operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_rnn_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class RNNGradTest(test.TestCase):
@test_util.deprecated_graph_mode_only
def testBlockLSTMV1V2Consistency(self):
num_steps = 1
batch_size = 1
input_size = 1
hidden_size = 8
w = deterministic_random_uniform(
[input_size + hidden_size, 4 * hidden_size])
b = deterministic_random_uniform([4 * hidden_size])
x = deterministic_random_uniform([num_steps, batch_size, input_size])
cs_prev = h_prev = deterministic_random_uniform([batch_size, hidden_size])
all_cs, all_h = self._lstm_block(
functools.partial(
gen_rnn_ops.BlockLSTM,
forget_bias=0.0, # Disable to match V2 default.
cell_clip=0.0), # Disable to match V2 default.
w, b, x, cs_prev, h_prev)
w_grad, b_grad = gradients.gradients(all_cs + all_h, [w, b])
w_ifco, b_ifco = icfo_to_ifco(w, b)
all_cs_ifco, all_h_ifco = self._lstm_block(
gen_rnn_ops.BlockLSTMV2, w_ifco, b_ifco, x, cs_prev, h_prev)
w_ifco_grad, b_ifco_grad = gradients.gradients(
all_cs_ifco + all_h_ifco, [w_ifco, b_ifco])
self.assertAllEqual(all_cs, all_cs_ifco)
self.assertAllEqual(all_h, all_h_ifco)
self.assertAllEqual(w_grad, w_ifco_grad)
self.assertAllEqual(b_grad, b_ifco_grad)
@test_util.deprecated_graph_mode_only
def testLSTMBlockCell(self):
batch_size = np.random.randint(1, 32)
input_size = np.random.randint(1, 32)
hidden_size = np.random.randint(1, 32)
w = deterministic_random_uniform(
[input_size + hidden_size, 4 * hidden_size])
b = deterministic_random_uniform([4 * hidden_size])
x = deterministic_random_uniform([batch_size, input_size])
cs_prev = h_prev = deterministic_random_uniform([batch_size, hidden_size])
w_peephole = array_ops.zeros(cs_prev.shape[1:], dtype=w.dtype)
cs_grad = deterministic_random_uniform([batch_size, hidden_size])
h_grad = deterministic_random_uniform([batch_size, hidden_size])
outputs = []
grads = []
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
output = gen_rnn_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=w_peephole,
wcf=w_peephole,
wco=w_peephole,
b=b,
forget_bias=1.0,
cell_clip=0.0,
use_peephole=False)
(i, cs, f, o, ci, co, _) = output
grad = gen_rnn_ops.lstm_block_cell_grad(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=w_peephole,
wcf=w_peephole,
wco=w_peephole,
b=b,
i=i,
cs=cs,
f=f,
o=o,
ci=ci,
co=co,
cs_grad=cs_grad,
h_grad=h_grad,
use_peephole=False)
outputs.append(output)
grads.append(grad)
self.assertAllClose(outputs[0], outputs[1])
self.assertAllClose(grads[0], grads[1])
def _lstm_block(self, op, w, b, x, cs_prev, h_prev):
w_peephole = array_ops.zeros(cs_prev.shape[1:], dtype=w.dtype)
_, all_cs, _, _, _, _, all_h = op(
seq_len_max=math_ops.cast(array_ops.shape(x)[0], dtypes.int64),
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=w_peephole,
wcf=w_peephole,
wco=w_peephole,
b=b,
use_peephole=False)
return all_cs, all_h
def deterministic_random_uniform(shape):
return ops.convert_to_tensor(np.random.random(shape), dtype=dtypes.float32)
def icfo_to_ifco(w, b):
"""Convert gates' weights and biases from ICFO to IFCO layout."""
w_i, w_c, w_f, w_o = array_ops.split(w, num_or_size_splits=4, axis=1)
b_i, b_c, b_f, b_o = array_ops.split(b, num_or_size_splits=4)
w_ifco = array_ops.concat([w_i, w_f, w_c, w_o], axis=1)
b_ifco = array_ops.concat([b_i, b_f, b_c, b_o], axis=0)
return w_ifco, b_ifco
if __name__ == "__main__":
test.main()
| apache-2.0 |
ujenmr/ansible | test/integration/targets/aws_lambda/files/mini_lambda.py | 139 | 1237 | from __future__ import print_function
import json
import os
def handler(event, context):
"""
The handler function is the function which gets called each time
the lambda is run.
"""
# printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
# the log entry.
print("got event:\n" + json.dumps(event))
# if the name parameter isn't present this can throw an exception
# which will result in an amazon chosen failure from the lambda
# which can be completely fine.
name = event["name"]
# we can use environment variables as part of the configuration of the lambda
# which can change the behaviour of the lambda without needing a new upload
extra = os.environ.get("EXTRA_MESSAGE")
if extra is not None and len(extra) > 0:
greeting = "hello {0}. {1}".format(name, extra)
else:
greeting = "hello " + name
return {"message": greeting}
def main():
"""
This main function will normally never be called during normal
lambda use. It is here for testing the lambda program only.
"""
event = {"name": "james"}
context = None
print(handler(event, context))
if __name__ == '__main__':
main()
| gpl-3.0 |
sanyaade-webdev/media-service-demo | src/msd/msd_utils.py | 2 | 1274 | # media-service-demo
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Mark Ryan <mark.d.ryan@intel.com>
#
import tempfile
import pygtk
pygtk.require('2.0')
import gtk
import urllib2
import os
def image_from_file(url):
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpFileName = tmpfile.name
image = None
try:
with tmpfile:
message = urllib2.urlopen(url, None, 15)
tmpfile.write(message.read())
image = gtk.Image()
image.set_from_file(tmpfile.name)
finally:
os.unlink(tmpFileName)
return image
| lgpl-2.1 |
franek/weboob | weboob/tools/date.py | 1 | 4493 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2013 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from datetime import date, timedelta
try:
from dateutil import tz
except ImportError:
raise ImportError('Please install python-dateutil')
__all__ = ['local2utc', 'utc2local', 'LinearDateGuesser']
def local2utc(dateobj):
dateobj = dateobj.replace(tzinfo=tz.tzlocal())
dateobj = dateobj.astimezone(tz.tzutc())
return dateobj
def utc2local(dateobj):
dateobj = dateobj.replace(tzinfo=tz.tzutc())
dateobj = dateobj.astimezone(tz.tzlocal())
return dateobj
class LinearDateGuesser(object):
"""
The aim of this class is to guess the exact date object from
a day and a month, but not a year.
It works with a start date (default is today), and all dates must be
sorted from recent to older.
"""
def __init__(self, current_date=None, date_max_bump=timedelta(7)):
self.date_max_bump = date_max_bump
if current_date is None:
current_date = date.today()
self.current_date = current_date
def try_assigning_year(self, day, month, start_year, max_year):
"""
Tries to create a date object with day, month and start_year and returns
it.
If it fails due to the year not matching the day+month combination
(i.e. due to a ValueError -- TypeError and OverflowError are not
handled), the previous or next years are tried until max_year is
reached.
In case initialization still fails with max_year, this function raises
a ValueError.
"""
while 1:
try:
return date(start_year, month, day)
except ValueError, e:
if start_year == max_year:
raise e
start_year += cmp(max_year, start_year)
def set_current_date(self, current_date):
self.current_date = current_date
def guess_date(self, day, month, change_current_date=True):
""" Returns a date object built from a given day/month pair. """
today = self.current_date
# The website only provides dates using the 'DD/MM' string, so we have to
# determine the most possible year by ourselves. This implies tracking
# the current date.
# However, we may also encounter "bumps" in the dates, e.g. "12/11,
# 10/11, 10/11, 12/11, 09/11", so we have to be, well, quite tolerant,
# by accepting dates in the near future (say, 7 days) of the current
# date. (Please, kill me...)
# We first try to keep the current year
naively_parsed_date = self.try_assigning_year(day, month, today.year, today.year - 5)
if (naively_parsed_date.year != today.year):
# we most likely hit a 29/02 leading to a change of year
if change_current_date:
self.set_current_date(naively_parsed_date)
return naively_parsed_date
if (naively_parsed_date > today + self.date_max_bump):
# if the date ends up too far in the future, consider it actually
# belongs to the previous year
parsed_date = date(today.year - 1, month, day)
if change_current_date:
self.set_current_date(parsed_date)
elif (naively_parsed_date > today and naively_parsed_date <= today + self.date_max_bump):
# if the date is in the near future, consider it is a bump
parsed_date = naively_parsed_date
# do not keep it as current date though
else:
# if the date is in the past, as expected, simply keep it
parsed_date = naively_parsed_date
# and make it the new current date
if change_current_date:
self.set_current_date(parsed_date)
return parsed_date
| agpl-3.0 |
knz/slcore | slc/tools/slc/mt/mtsparc/asmproc/opt.py | 2 | 10867 | import re
import common
from ..regdefs import regmagic
_default_presets = set(("$%d" % x for x in xrange(31)))
def grouper(items):
"""
Group function code together.
Produce a generator of (type, code, comment).
"""
presets = _default_presets.copy()
presets.add("$l0")
state = 0
queue = []
for (type, content, comment) in items:
if state == 0:
if content.startswith('.type') and content.endswith("#function"):
name = content.split(None)[1].split(',')[0]
state = 1
queue.append((type, content, comment))
continue
else:
queue.append((type, content, comment))
if (not content.startswith('.size')) or content.split(None)[1].split(',')[0] != name:
continue
yield ('fun',{'name':name,'body':queue,'presets':presets},'')
queue = []
state = 0
continue
for q in queue:
yield q
queue = []
state = 0
yield (type, content, comment)
_re_vname = re.compile(r'%(r|t[dsgl])(f?\d+)')
_re_canon = re.compile(r'\$([dsgl]?f?\d+)')
def vnametocanon(fundata, items):
def repl(match):
t = match.group(1)
n = match.group(2)
t = t[1:]
return '$%s%s' % (t, n)
for (type,content,comment) in items:
content = _re_vname.sub(repl, content)
comment = _re_vname.sub(repl, comment)
yield (type, content, comment)
def canontovname(fundata, items):
def repl(match):
t = match.group(1)
if t[0] in "dsgl":
p = 't'
else:
p = 'r'
return '%%%s%s' % (p, t)
for (type,content,comment) in items:
content = _re_canon.sub(repl, content)
comment = _re_canon.sub(repl, comment)
yield (type, content, comment)
_re_regnum = re.compile('([0-9]+)')
def nextreg(reg):
return _re_regnum.sub(lambda n:str(int(n.group(1))+1), reg)
# seereg looks at registers that are written by threads (locals, dependents)
_re_seereg = re.compile(r'\$[ld]?f?\d+')
def seereg(reg, str):
for m in _re_seereg.findall(str):
if m == reg:
return True
return False
_re_creargs = re.compile(r'.*MT: CREATE.*DRAIN\(([^)]*)\)')
def makedataflow(addswch, drainargs):
class DetectDataflow(object):
def __init__(self):
if addswch:
self.__name__ = "addswchll"
else:
self.__name__ = "dataflow"
def __call__(self, fundata, items):
"""
Add "swch" annotations after instructions that may consume
the result of a long-latency operation.
Additionally, as an optimization, insert a "swch" annotation before
as well, to give some extra delay to the long-latency operations
by forcing a cycle through the thread active queue.
"""
allregs = regmagic.allregs
swchbeforell = None
if addswch:
swchbeforell = self.extra_options.get('swch-before-ll-use',True)
locals_offset = regmagic.rd.mt_locals_offset
swchenabled = True
# On UT-LEON3, we can't switch on a branch/delayed instruction that
# reads from a waiting register, in which case we must ensure that
# every register for which there is an uncertainy is full before the
# instruction is reached. This flag can be enabled from the command-line
# with -fasmopt-flush-before-delayed.
flushbeforedelayed = self.extra_options.get('flush-before-delayed',False)
# initially exclude index
maybell = allregs.copy()
for (type, content, comment) in items:
#print (type,content[:3],comment)
if type == 'label':
#print "FOO"
yield (type, content, comment)
maybell = allregs.copy()
elif type == 'empty':
yield (type, content, comment)
if comment.startswith('MTREG_SET:'):
hint = set(comment.split(':',1)[1].strip().split(','))
maybell = maybell - hint
elif comment.startswith('MTREG_SPEC:'):
hint = set(comment.split(':',1)[1].strip().split(','))
maybell = maybell - hint
allregs = allregs - hint
elif type == 'directive' and content in ['.swch', '.noswch']:
swchenabled = (content == '.swch')
elif type == 'other' and content.metadata is not None:
#print "BAR %s :: %s ::" % (content, comment)
reads = []
shortwrites = []
longwrites = []
md = content.metadata
ops = content.operands
ll = md.long_latency
for i in md.inputs:
op = ops[i]
reads.append(op)
if i in md.double_regs:
reads.append(nextreg(op))
#print "hello double",i,reads
for i in md.extra_inputs:
if i == 'y': continue
reads.append('$%d' % i)
reads.append('$l%d' % (i-locals_offset))
if ll:
outputs = longwrites
else:
outputs = shortwrites
for i in md.outputs:
op = ops[i]
outputs.append(op)
if i in md.double_regs:
outputs.append(nextreg(op))
for i in md.extra_outputs:
if i == 'y': continue
outputs.append('$%d' % i)
outputs.append('$l%d' % (i-locals_offset))
if drainargs and content.startswith('create'):
m = _re_creargs.match(comment)
if m is not None:
argregs = m.group(1).split(',')
removed = set()
for rx in maybell:
for r in argregs:
if seereg(rx, r):
yield('other', 'mov %s,%s' %(r,r), 'MT: drain create arg')
yield('other', 'swch', '')
#print "XXX", argregs
removed.add(r)
maybell = maybell - removed
test = 0
# "q" is the set of touched registers -- those that
# hold a ready value after the current instruction.
q = set()
# if one of the "maybe regs" is read from,
# assume we are reading result from long latency.
#print "maybell = %r" % (maybell,)
#print "reads = %r" % (reads,)
for rx in maybell:
for r in reads:
if seereg(rx,r):
test = 1
q.add(rx)
if test == 1 and md.delayed and flushbeforedelayed:
# we can't suspend a delayed instruction,
# so we need to force all inputs beforehand
for r in q:
yield ('other', 'mov %s,%s' %(r,r), 'MT: force read before delayed')
yield ('other', 'swch', '')
test = 0
if test == 1 and addswch and swchbeforell and swchenabled:
yield ('other','swch','MT: before-ll: %s' % ','.join(q))
yield (type, content, comment)
if test == 1 and addswch and swchenabled:
yield ('other','swch','MT: after-ll: %s' % ','.join(q))
if content.metadata.is_branch:
maybell = allregs.copy()
else:
# all the registers that were "short written"
# will not cause long latency during next use
for rx in maybell:
for r in shortwrites:
if seereg(rx, r):
q.add(rx)
# remove touched registers from the long latency registers
maybell = maybell - q
# add generated long latency
for rx in allregs:
for r in longwrites:
if seereg(rx, r):
maybell.add(rx)
else:
yield (type, content, comment)
return DetectDataflow()
def protectend(fundata, items):
"""
Insert a "nop" instruction just before "end"
when "end" immediately succeeds a label or a delay slot
"""
atlabel = True
for (type, content, comment) in items:
if type == 'label' or (type == 'other' and 'DELAYSLOT' in comment):
atlabel = True
elif type == 'other':
if content == 'end' and atlabel:
yield ('other', 'nop', 'MT: end after label/delayslot')
atlabel = False
yield (type, content, comment)
from ...common.asmproc.remdefaults import *
from ...common.asmproc.labels import *
from ...common.asmproc.swch import *
from ...common.asmproc.end import *
from decode import decode
from delay import *
_filter_begin = [common.reader,
common.lexer,
common.splitsemi,
common.parser,
grouper]
_filter_stages = [
vnametocanon,
remdefaults,
findlabels,
killunusedlabels,
decode,
makedataflow(addswch = True, drainargs = False),
rmdupswch,
rmswchbegin,
prunenopend,
markdelay,
protectend,
canontovname,
rmswchdir,
]
_filter_end = [common.flattener,
common.printer]
from ...common.asmproc import enhance
def filter(*args):
return enhance.filter(_filter_begin, _filter_stages, _filter_end, *args)
| gpl-3.0 |
jnewland/home-assistant | homeassistant/components/blink/alarm_control_panel.py | 7 | 2398 | """Support for Blink Alarm Control Panel."""
import logging
from homeassistant.components.alarm_control_panel import AlarmControlPanel
from homeassistant.const import (
ATTR_ATTRIBUTION, STATE_ALARM_ARMED_AWAY, STATE_ALARM_DISARMED)
from . import BLINK_DATA, DEFAULT_ATTRIBUTION
_LOGGER = logging.getLogger(__name__)
ICON = 'mdi:security'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arlo Alarm Control Panels."""
if discovery_info is None:
return
data = hass.data[BLINK_DATA]
sync_modules = []
for sync_name, sync_module in data.sync.items():
sync_modules.append(BlinkSyncModule(data, sync_name, sync_module))
add_entities(sync_modules, True)
class BlinkSyncModule(AlarmControlPanel):
"""Representation of a Blink Alarm Control Panel."""
def __init__(self, data, name, sync):
"""Initialize the alarm control panel."""
self.data = data
self.sync = sync
self._name = name
self._state = None
@property
def unique_id(self):
"""Return the unique id for the sync module."""
return self.sync.serial
@property
def icon(self):
"""Return icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the panel."""
return "{} {}".format(BLINK_DATA, self._name)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = self.sync.attributes
attr['network_info'] = self.data.networks
attr['associated_cameras'] = list(self.sync.cameras.keys())
attr[ATTR_ATTRIBUTION] = DEFAULT_ATTRIBUTION
return attr
def update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating Blink Alarm Control Panel %s", self._name)
self.data.refresh()
mode = self.sync.arm
if mode:
self._state = STATE_ALARM_ARMED_AWAY
else:
self._state = STATE_ALARM_DISARMED
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.sync.arm = False
self.sync.refresh()
def alarm_arm_away(self, code=None):
"""Send arm command."""
self.sync.arm = True
self.sync.refresh()
| apache-2.0 |
nesdis/djongo | tests/django_tests/tests/v22/tests/flatpages_tests/test_views.py | 130 | 6846 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view"
response = self.client.get('/flatpage_root/flatpage/')
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"""A nonexistent flatpage raises 404 when served through a view."""
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
user = User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.force_login(user)
response = self.client.get('/flatpage_root/sekrit/')
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage won't be served if the fallback middleware is
disabled.
"""
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here/')
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"""
A nonexistent flatpage raises 404 when served through a view and
should not add a slash.
"""
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled and should not add a slash"
response = self.client.get('/flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage won't be served if the fallback middleware is
disabled and should not add a slash.
"""
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here')
self.assertRedirects(response, '/flatpage_root/some.very_special~chars-here/', status_code=301)
| agpl-3.0 |
jokey2k/sentry | src/sentry/migrations/0113_auto__add_field_team_status.py | 36 | 24270 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Team.status'
db.add_column('sentry_team', 'status',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Team.status'
db.delete_column('sentry_team', 'status')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}, 'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
benrudolph/commcare-hq | corehq/apps/hqadmin/system_info/checks.py | 2 | 3064 | from django.core import cache
from django.conf import settings
from django.utils.safestring import mark_safe
from restkit import Resource
import json
from corehq.apps.hqadmin.system_info.utils import human_bytes
from soil import heartbeat
def check_redis():
#redis status
ret = {}
redis_status = ""
redis_results = ""
if 'redis' in settings.CACHES:
rc = cache.get_cache('redis')
try:
import redis
redis_api = redis.StrictRedis.from_url('redis://%s' % rc._server)
info_dict = redis_api.info()
redis_status = "Online"
redis_results = "Used Memory: %s" % info_dict['used_memory_human']
except Exception, ex:
redis_status = "Offline"
redis_results = "Redis connection error: %s" % ex
else:
redis_status = "Not Configured"
redis_results = "Redis is not configured on this system!"
ret['redis_status'] = redis_status
ret['redis_results'] = redis_results
return ret
def check_rabbitmq():
ret ={}
mq_status = "Unknown"
if settings.BROKER_URL.startswith('amqp'):
amqp_parts = settings.BROKER_URL.replace('amqp://','').split('/')
mq_management_url = amqp_parts[0].replace('5672', '15672')
vhost = amqp_parts[1]
try:
mq = Resource('http://%s' % mq_management_url, timeout=2)
vhost_dict = json.loads(mq.get('api/vhosts', timeout=2).body_string())
mq_status = "Offline"
for d in vhost_dict:
if d['name'] == vhost:
mq_status='RabbitMQ OK'
except Exception, ex:
mq_status = "RabbitMQ Error: %s" % ex
else:
mq_status = "RabbitMQ Not configured"
ret['rabbitmq_status'] = mq_status
return ret
def check_celery_health():
ret = {}
celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None)
worker_status = ""
if celery_monitoring:
cresource = Resource(celery_monitoring, timeout=3)
all_workers = {}
try:
t = cresource.get("api/workers").body_string()
all_workers = json.loads(t)
except Exception, ex:
pass
worker_ok = '<span class="label label-success">OK</span>'
worker_bad = '<span class="label label-important">Down</span>'
tasks_ok = 'label-success'
tasks_full = 'label-warning'
worker_info = []
for hostname, w in all_workers.items():
status_html = mark_safe(worker_ok if w['status'] else worker_bad)
tasks_class = tasks_full if w['running_tasks'] == w['concurrency'] else tasks_ok
tasks_html = mark_safe('<span class="label %s">%d / %d</span> :: %d' % (tasks_class, w['running_tasks'], w['concurrency'], w['completed_tasks']))
worker_info.append(' '.join([hostname, status_html, tasks_html]))
worker_status = '<br>'.join(worker_info)
ret['worker_status'] = mark_safe(worker_status)
ret['heartbeat'] = heartbeat.is_alive()
return ret
| bsd-3-clause |
adrienbrault/home-assistant | tests/components/vizio/test_config_flow.py | 6 | 33389 | """Tests for Vizio config flow."""
import pytest
import voluptuous as vol
from homeassistant import data_entry_flow
from homeassistant.components.media_player import DEVICE_CLASS_SPEAKER, DEVICE_CLASS_TV
from homeassistant.components.vizio.config_flow import _get_config_schema
from homeassistant.components.vizio.const import (
CONF_APPS,
CONF_APPS_TO_INCLUDE_OR_EXCLUDE,
CONF_INCLUDE,
CONF_VOLUME_STEP,
DEFAULT_NAME,
DEFAULT_VOLUME_STEP,
DOMAIN,
VIZIO_SCHEMA,
)
from homeassistant.config_entries import (
SOURCE_IGNORE,
SOURCE_IMPORT,
SOURCE_USER,
SOURCE_ZEROCONF,
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_DEVICE_CLASS,
CONF_HOST,
CONF_NAME,
CONF_PIN,
CONF_PORT,
)
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ACCESS_TOKEN,
CURRENT_APP,
HOST,
HOST2,
MOCK_IMPORT_VALID_TV_CONFIG,
MOCK_INCLUDE_APPS,
MOCK_INCLUDE_NO_APPS,
MOCK_PIN_CONFIG,
MOCK_SPEAKER_CONFIG,
MOCK_TV_CONFIG_NO_TOKEN,
MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG,
MOCK_TV_WITH_EXCLUDE_CONFIG,
MOCK_USER_VALID_TV_CONFIG,
MOCK_ZEROCONF_SERVICE_INFO,
NAME,
NAME2,
UNIQUE_ID,
VOLUME_STEP,
)
from tests.common import MockConfigEntry
async def test_user_flow_minimum_fields(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test user config flow with minimum fields."""
# test form shows
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_SPEAKER_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_SPEAKER
async def test_user_flow_all_fields(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test user config flow with all fields."""
# test form shows
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_VALID_TV_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert result["data"][CONF_ACCESS_TOKEN] == ACCESS_TOKEN
assert CONF_APPS not in result["data"]
async def test_speaker_options_flow(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for speaker."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_SPEAKER_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_VOLUME_STEP: VOLUME_STEP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS not in result["data"]
async def test_tv_options_flow_no_apps(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for TV without providing apps option."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
options = {CONF_VOLUME_STEP: VOLUME_STEP}
options.update(MOCK_INCLUDE_NO_APPS)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=options
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS not in result["data"]
async def test_tv_options_flow_with_apps(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for TV with providing apps option."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
options = {CONF_VOLUME_STEP: VOLUME_STEP}
options.update(MOCK_INCLUDE_APPS)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=options
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS in result["data"]
assert result["data"][CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
async def test_tv_options_flow_start_with_volume(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test options config flow for TV with providing apps option after providing volume step in initial config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry = result["result"]
result = await hass.config_entries.options.async_init(
entry.entry_id, data={CONF_VOLUME_STEP: VOLUME_STEP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert entry.options
assert entry.options == {CONF_VOLUME_STEP: VOLUME_STEP}
assert CONF_APPS not in entry.options
assert CONF_APPS_TO_INCLUDE_OR_EXCLUDE not in entry.options
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
options = {CONF_VOLUME_STEP: VOLUME_STEP}
options.update(MOCK_INCLUDE_APPS)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=options
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
assert CONF_APPS in result["data"]
assert result["data"][CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
async def test_user_host_already_configured(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test host is already configured during user setup."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
fail_entry = MOCK_SPEAKER_CONFIG.copy()
fail_entry[CONF_NAME] = "newtestname"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=fail_entry
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "existing_config_entry_found"}
async def test_user_serial_number_already_exists(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test serial_number is already configured with different host and name during user setup."""
# Set up new entry
MockConfigEntry(
domain=DOMAIN, data=MOCK_SPEAKER_CONFIG, unique_id=UNIQUE_ID
).add_to_hass(hass)
# Set up new entry with same unique_id but different host and name
fail_entry = MOCK_SPEAKER_CONFIG.copy()
fail_entry[CONF_HOST] = HOST2
fail_entry[CONF_NAME] = NAME2
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=fail_entry
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "existing_config_entry_found"}
async def test_user_error_on_could_not_connect(
hass: HomeAssistantType, vizio_no_unique_id: pytest.fixture
) -> None:
"""Test with could_not_connect during user setup due to no connectivity."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "cannot_connect"}
async def test_user_error_on_could_not_connect_invalid_token(
hass: HomeAssistantType, vizio_cant_connect: pytest.fixture
) -> None:
"""Test with could_not_connect during user setup due to invalid token."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_VALID_TV_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_tv_pairing_no_apps(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_complete_pairing: pytest.fixture,
) -> None:
"""Test pairing config flow when access token not provided for tv during user entry and no apps configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pairing_complete"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert CONF_APPS not in result["data"]
async def test_user_start_pairing_failure(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_start_pairing_failure: pytest.fixture,
) -> None:
"""Test failure to start pairing from user config flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_invalid_pin(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_invalid_pin_failure: pytest.fixture,
) -> None:
"""Test failure to complete pairing from user config flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
assert result["errors"] == {CONF_PIN: "complete_pairing_failed"}
async def test_user_ignore(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test user config flow doesn't throw an error when there's an existing ignored source."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_SPEAKER_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import_flow_minimum_fields(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test import config flow with minimum fields."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(
{CONF_HOST: HOST, CONF_DEVICE_CLASS: DEVICE_CLASS_SPEAKER}
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == DEFAULT_NAME
assert result["data"][CONF_NAME] == DEFAULT_NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_SPEAKER
assert result["data"][CONF_VOLUME_STEP] == DEFAULT_VOLUME_STEP
async def test_import_flow_all_fields(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test import config flow with all fields."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_IMPORT_VALID_TV_CONFIG),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert result["data"][CONF_ACCESS_TOKEN] == ACCESS_TOKEN
assert result["data"][CONF_VOLUME_STEP] == VOLUME_STEP
async def test_import_entity_already_configured(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test entity is already configured during import setup."""
entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
options={CONF_VOLUME_STEP: VOLUME_STEP},
)
entry.add_to_hass(hass)
fail_entry = vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG.copy())
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=fail_entry
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured_device"
async def test_import_flow_update_options(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with updated options."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].options == {CONF_VOLUME_STEP: DEFAULT_VOLUME_STEP}
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry_id = result["result"].entry_id
updated_config = MOCK_SPEAKER_CONFIG.copy()
updated_config[CONF_VOLUME_STEP] = VOLUME_STEP + 1
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(updated_config),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
config_entry = hass.config_entries.async_get_entry(entry_id)
assert config_entry.options[CONF_VOLUME_STEP] == VOLUME_STEP + 1
async def test_import_flow_update_name_and_apps(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with updated name and apps."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_IMPORT_VALID_TV_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].data[CONF_NAME] == NAME
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entry_id = result["result"].entry_id
updated_config = MOCK_IMPORT_VALID_TV_CONFIG.copy()
updated_config[CONF_NAME] = NAME2
updated_config[CONF_APPS] = {CONF_INCLUDE: [CURRENT_APP]}
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(updated_config),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
config_entry = hass.config_entries.async_get_entry(entry_id)
assert config_entry.data[CONF_NAME] == NAME2
assert config_entry.data[CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
assert config_entry.options[CONF_APPS] == {CONF_INCLUDE: [CURRENT_APP]}
async def test_import_flow_update_remove_apps(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with removed apps."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_TV_WITH_EXCLUDE_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].data[CONF_NAME] == NAME
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
config_entry = hass.config_entries.async_get_entry(result["result"].entry_id)
assert CONF_APPS in config_entry.data
assert CONF_APPS in config_entry.options
updated_config = MOCK_TV_WITH_EXCLUDE_CONFIG.copy()
updated_config.pop(CONF_APPS)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(updated_config),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
assert CONF_APPS not in config_entry.data
assert CONF_APPS not in config_entry.options
async def test_import_needs_pairing(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_complete_pairing: pytest.fixture,
) -> None:
"""Test pairing config flow when access token not provided for tv during import."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_TV_CONFIG_NO_TOKEN
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pairing_complete_import"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
async def test_import_with_apps_needs_pairing(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_complete_pairing: pytest.fixture,
) -> None:
"""Test pairing config flow when access token not provided for tv but apps are included during import."""
import_config = MOCK_TV_CONFIG_NO_TOKEN.copy()
import_config[CONF_APPS] = {CONF_INCLUDE: [CURRENT_APP]}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=import_config
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Mock inputting info without apps to make sure apps get stored
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=_get_config_schema(MOCK_TV_CONFIG_NO_TOKEN)(MOCK_TV_CONFIG_NO_TOKEN),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pair_tv"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_PIN_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pairing_complete_import"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_TV
assert result["data"][CONF_APPS][CONF_INCLUDE] == [CURRENT_APP]
async def test_import_flow_additional_configs(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_update: pytest.fixture,
) -> None:
"""Test import config flow with additional configs defined in CONF_APPS."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG),
)
await hass.async_block_till_done()
assert result["result"].data[CONF_NAME] == NAME
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
config_entry = hass.config_entries.async_get_entry(result["result"].entry_id)
assert CONF_APPS in config_entry.data
assert CONF_APPS not in config_entry.options
async def test_import_error(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test that error is logged when import config has an error."""
entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
fail_entry = MOCK_SPEAKER_CONFIG.copy()
fail_entry[CONF_HOST] = "0.0.0.0"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(fail_entry),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Ensure error gets logged
vizio_log_list = [
log
for log in caplog.records
if log.name == "homeassistant.components.vizio.config_flow"
]
assert len(vizio_log_list) == 1
async def test_import_ignore(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
) -> None:
"""Test import config flow doesn't throw an error when there's an existing ignored source."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_zeroconf_flow(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf config flow."""
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Form should always show even if all required properties are discovered
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Apply discovery updates to entry to mimic when user hits submit without changing
# defaults which were set from discovery parameters
user_input = result["data_schema"](discovery_info)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=user_input
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_DEVICE_CLASS] == DEVICE_CLASS_SPEAKER
async def test_zeroconf_flow_already_configured(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test entity is already configured during zeroconf setup."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
# Try rediscovering same device
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_flow_with_port_in_host(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test entity is already configured during zeroconf setup when port is in host."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
# Try rediscovering same device, this time with port already in host
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
discovery_info[
CONF_HOST
] = f"{discovery_info[CONF_HOST]}:{discovery_info[CONF_PORT]}"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_dupe_fail(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf config flow when device gets discovered multiple times."""
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Form should always show even if all required properties are discovered
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_zeroconf_ignore(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf discovery doesn't throw an error when there's an existing ignored source."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
)
entry.add_to_hass(hass)
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_zeroconf_no_unique_id(
hass: HomeAssistantType,
vizio_guess_device_type: pytest.fixture,
vizio_no_unique_id: pytest.fixture,
) -> None:
"""Test zeroconf discovery aborts when unique_id is None."""
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_zeroconf_abort_when_ignored(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test zeroconf discovery aborts when the same host has been ignored."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_SPEAKER_CONFIG,
options={CONF_VOLUME_STEP: VOLUME_STEP},
source=SOURCE_IGNORE,
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_flow_already_configured_hostname(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_hostname_check: pytest.fixture,
vizio_guess_device_type: pytest.fixture,
) -> None:
"""Test entity is already configured during zeroconf setup when existing entry uses hostname."""
config = MOCK_SPEAKER_CONFIG.copy()
config[CONF_HOST] = "hostname"
entry = MockConfigEntry(
domain=DOMAIN,
data=config,
options={CONF_VOLUME_STEP: VOLUME_STEP},
unique_id=UNIQUE_ID,
)
entry.add_to_hass(hass)
# Try rediscovering same device
discovery_info = MOCK_ZEROCONF_SERVICE_INFO.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
# Flow should abort because device is already setup
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_import_flow_already_configured_hostname(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_bypass_setup: pytest.fixture,
vizio_hostname_check: pytest.fixture,
) -> None:
"""Test entity is already configured during import setup when existing entry uses hostname."""
config = MOCK_SPEAKER_CONFIG.copy()
config[CONF_HOST] = "hostname"
entry = MockConfigEntry(
domain=DOMAIN, data=config, options={CONF_VOLUME_STEP: VOLUME_STEP}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
)
# Flow should abort because device was updated
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "updated_entry"
assert entry.data[CONF_HOST] == HOST
| mit |
aureooms/networkx | examples/algorithms/blockmodel.py | 12 | 3014 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H = next(nx.connected_component_subgraphs(G))
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| bsd-3-clause |
torstehu/medlemssys | medlemssys/innhenting/management/commands/postnr_bolstad_import.py | 1 | 2788 | # -*- coding: utf-8 -*-
# vim: shiftwidth=4 tabstop=4 expandtab softtabstop=4 ai
# Copyright 2009-2014 Odin Hørthe Omdal
# This file is part of Medlemssys.
# Medlemssys is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Medlemssys is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Medlemssys. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from medlem.models import PostNummer
import csv
import os
obj = ""
class Command(BaseCommand):
args = '<Tab seperated Erik Bolstad postnr-CSV files>'
help = 'Importerer medlemane inn i databasen'
def handle(self, *args, **options):
global obj
obj = self
if 'force_update' in options:
self.force_update = True
if not os.path.isfile(args[0]):
raise CommandError("Fila finst ikkje ({0})".format(args[0]).encode('utf8'))
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
# POSTNR, POSTSTAD, POSTNR- OG STAD, BRUKSOMRÅDE, FOLKETAL, BYDEL, KOMMNR, KOMMUNE, FYLKE, LAT, LON, DATAKVALITET, DATAKVALITETSFORKLARING, SIST OPPDATERT
csv.register_dialect('tabs', delimiter='\t')
read = csv.reader(open(args[0]), dialect='tabs')
row = read.next()
if row[0] != 'POSTNR' or row[11] != 'DATAKVALITET':
raise CommandError("Ser ikkje ut som den korrekte type fila")
for row in read:
p = PostNummer()
p.postnr = row[0].strip().replace(' ', '')
p.poststad = row[1]
p.bruksomrade = row[3]
if (row[4] != ""):
p.folketal = int(row[4].strip())
p.bydel = row[5]
p.kommnr = row[6]
p.kommune = row[7]
p.fylke = row[8]
p.lat = float(row[9])
p.lon = float(row[10])
p.datakvalitet = int(row[11])
if row[13][0] == "2":
p.sist_oppdatert = row[13]
p.save()
# print "'%s' '%s' '%s'" % (row, row[6:7], row[7:8])
def stderr(msg):
if obj:
obj.stderr.write((unicode(msg) + "\n").encode('utf-8'))
else:
print((unicode(msg)).encode('utf-8'))
| agpl-3.0 |
SnappleCap/oh-mainline | vendor/packages/Django/django/contrib/gis/geos/base.py | 225 | 1635 | from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| agpl-3.0 |
yukim/cassandra-dtest | cql_tests.py | 1 | 219237 | # coding: utf-8
import random
import time
from collections import OrderedDict
from uuid import uuid4, UUID
from dtest import Tester, canReuseCluster, freshCluster
from assertions import assert_invalid, assert_one, assert_none, assert_all
from tools import since, require, rows_to_list
from cassandra import ConsistencyLevel, InvalidRequest
from cassandra.protocol import ProtocolException, SyntaxException, ConfigurationException, InvalidRequestException
from cassandra.query import SimpleStatement
from cassandra.util import sortedset
cql_version = "3.0.0"
@canReuseCluster
class TestCQL(Tester):
def prepare(self, ordered=False, create_keyspace=True, use_cache=False, nodes=1, rf=1):
cluster = self.cluster
if (ordered):
cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")
if (use_cache):
cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})
if not cluster.nodelist():
cluster.populate(nodes).start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
session = self.patient_cql_connection(node1, version=cql_version)
if create_keyspace:
if self._preserve_cluster:
session.execute("DROP KEYSPACE IF EXISTS ks")
self.create_ks(session, 'ks', rf)
return session
def static_cf_test(self):
""" Test static CF syntax """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
res = cursor.execute("SELECT firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert rows_to_list(res) == [['Frodo', 'Baggins']], res
res = cursor.execute("SELECT * FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert rows_to_list(res) == [[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins']], res
res = cursor.execute("SELECT * FROM users")
assert rows_to_list(res) == [
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins'],
], res
# Test batch inserts
cursor.execute("""
BEGIN BATCH
INSERT INTO users (userid, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 36)
UPDATE users SET age = 37 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
DELETE firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000
DELETE firstname, lastname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
APPLY BATCH
""")
res = cursor.execute("SELECT * FROM users")
assert rows_to_list(res) == [
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None],
], res
def noncomposite_static_cf_test(self):
""" Test non-composite static CF syntax """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
) WITH COMPACT STORAGE;
""")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
res = cursor.execute("SELECT firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert rows_to_list(res) == [['Frodo', 'Baggins']], res
res = cursor.execute("SELECT * FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert rows_to_list(res) == [[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins']], res
res = cursor.execute("SELECT * FROM users")
assert rows_to_list(res) == [
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins'],
], res
# Test batch inserts
cursor.execute("""
BEGIN BATCH
INSERT INTO users (userid, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 36)
UPDATE users SET age = 37 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
DELETE firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000
DELETE firstname, lastname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
APPLY BATCH
""")
res = cursor.execute("SELECT * FROM users")
assert rows_to_list(res) == [
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None],
], res
def dynamic_cf_test(self):
""" Test non-composite dynamic CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid uuid,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
# Inserts
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://foo.bar', 42)")
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://foo-2.bar', 24)")
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://bar.bar', 128)")
cursor.execute("UPDATE clicks SET time = 24 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 and url = 'http://bar.foo'")
cursor.execute("UPDATE clicks SET time = 12 WHERE userid IN (f47ac10b-58cc-4372-a567-0e02b2c3d479, 550e8400-e29b-41d4-a716-446655440000) and url = 'http://foo-3'")
# Queries
res = cursor.execute("SELECT url, time FROM clicks WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert rows_to_list(res) == [['http://bar.bar', 128], ['http://foo-2.bar', 24], ['http://foo-3', 12], ['http://foo.bar', 42]], res
res = cursor.execute("SELECT * FROM clicks WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
assert rows_to_list(res) == [
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 'http://bar.foo', 24],
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 'http://foo-3', 12]
], res
res = cursor.execute("SELECT time FROM clicks")
# Result from 'f47ac10b-58cc-4372-a567-0e02b2c3d479' are first
assert rows_to_list(res) == [[24], [12], [128], [24], [12], [42]], res
# Check we don't allow empty values for url since this is the full underlying cell name (#6152)
assert_invalid(cursor, "INSERT INTO clicks (userid, url, time) VALUES (810e8500-e29b-41d4-a716-446655440000, '', 42)")
def dense_cf_test(self):
""" Test composite 'dense' CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE connections (
userid uuid,
ip text,
port int,
time bigint,
PRIMARY KEY (userid, ip, port)
) WITH COMPACT STORAGE;
""")
# Inserts
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.1', 80, 42)")
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.2', 80, 24)")
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.2', 90, 42)")
cursor.execute("UPDATE connections SET time = 24 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.2' AND port = 80")
# we don't have to include all of the clustering columns (see CASSANDRA-7990)
cursor.execute("INSERT INTO connections (userid, ip, time) VALUES (f47ac10b-58cc-4372-a567-0e02b2c3d479, '192.168.0.3', 42)")
cursor.execute("UPDATE connections SET time = 42 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.4'")
# Queries
res = cursor.execute("SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert rows_to_list(res) == [['192.168.0.1', 80, 42], ['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]], res
res = cursor.execute("SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip >= '192.168.0.2'")
assert rows_to_list(res) == [['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]], res
res = cursor.execute("SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip = '192.168.0.2'")
assert rows_to_list(res) == [['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]], res
res = cursor.execute("SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip > '192.168.0.2'")
assert rows_to_list(res) == [], res
res = cursor.execute("SELECT ip, port, time FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
self.assertEqual([['192.168.0.3', None, 42]], rows_to_list(res))
res = cursor.execute("SELECT ip, port, time FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.4'")
self.assertEqual([['192.168.0.4', None, 42]], rows_to_list(res))
# Deletion
cursor.execute("DELETE time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND ip = '192.168.0.2' AND port = 80")
res = cursor.execute("SELECT * FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert len(res) == 2, res
cursor.execute("DELETE FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
res = cursor.execute("SELECT * FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert len(res) == 0, res
cursor.execute("DELETE FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
res = cursor.execute("SELECT * FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
self.assertEqual([], res)
def sparse_cf_test(self):
""" Test composite 'sparse' CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE timeline (
userid uuid,
posted_month int,
posted_day int,
body text,
posted_by text,
PRIMARY KEY (userid, posted_month, posted_day)
);
""")
# Inserts
cursor.execute("INSERT INTO timeline (userid, posted_month, posted_day, body, posted_by) VALUES (550e8400-e29b-41d4-a716-446655440000, 1, 12, 'Something else', 'Frodo Baggins')")
cursor.execute("INSERT INTO timeline (userid, posted_month, posted_day, body, posted_by) VALUES (550e8400-e29b-41d4-a716-446655440000, 1, 24, 'Something something', 'Frodo Baggins')")
cursor.execute("UPDATE timeline SET body = 'Yo Froddo', posted_by = 'Samwise Gamgee' WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND posted_month = 1 AND posted_day = 3")
cursor.execute("UPDATE timeline SET body = 'Yet one more message' WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND posted_month = 1 and posted_day = 30")
# Queries
res = cursor.execute("SELECT body, posted_by FROM timeline WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND posted_month = 1 AND posted_day = 24")
assert rows_to_list(res) == [['Something something', 'Frodo Baggins']], res
res = cursor.execute("SELECT posted_day, body, posted_by FROM timeline WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND posted_month = 1 AND posted_day > 12")
assert rows_to_list(res) == [
[24, 'Something something', 'Frodo Baggins'],
[30, 'Yet one more message', None]
], res
res = cursor.execute("SELECT posted_day, body, posted_by FROM timeline WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND posted_month = 1")
assert rows_to_list(res) == [
[12, 'Something else', 'Frodo Baggins'],
[24, 'Something something', 'Frodo Baggins'],
[30, 'Yet one more message', None]
], res
def create_invalid_test(self):
""" Check invalid CREATE TABLE requests """
cursor = self.prepare()
assert_invalid(cursor, "CREATE TABLE test ()", expected=SyntaxException)
if self.cluster.version() < "1.2":
assert_invalid(cursor, "CREATE TABLE test (key text PRIMARY KEY)")
assert_invalid(cursor, "CREATE TABLE test (c1 text, c2 text, c3 text)")
assert_invalid(cursor, "CREATE TABLE test (key1 text PRIMARY KEY, key2 text PRIMARY KEY)")
assert_invalid(cursor, "CREATE TABLE test (key text PRIMARY KEY, key int)")
assert_invalid(cursor, "CREATE TABLE test (key text PRIMARY KEY, c int, c text)")
assert_invalid(cursor, "CREATE TABLE test (key text, key2 text, c int, d text, PRIMARY KEY (key, key2)) WITH COMPACT STORAGE")
@freshCluster()
def limit_ranges_test(self):
""" Validate LIMIT option for 'range queries' in SELECT statements """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (%i, 'http://foo.%s', 42)" % (id, tld))
# Queries
res = cursor.execute("SELECT * FROM clicks WHERE token(userid) >= token(2) LIMIT 1")
assert rows_to_list(res) == [[2, 'http://foo.com', 42]], res
res = cursor.execute("SELECT * FROM clicks WHERE token(userid) > token(2) LIMIT 1")
assert rows_to_list(res) == [[3, 'http://foo.com', 42]], res
def limit_multiget_test(self):
""" Validate LIMIT option for 'multiget' in SELECT statements """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (%i, 'http://foo.%s', 42)" % (id, tld))
# Check that we do limit the output to 1 *and* that we respect query
# order of keys (even though 48 is after 2)
res = cursor.execute("SELECT * FROM clicks WHERE userid IN (48, 2) LIMIT 1")
if self.cluster.version() >= '3.0':
assert rows_to_list(res) == [[2, 'http://foo.com', 42]], res
else:
assert rows_to_list(res) == [[48, 'http://foo.com', 42]], res
def tuple_query_mixed_order_columns_prepare(self, cursor, *col_order):
cursor.execute("""
create table foo (a int, b int, c int, d int , e int, PRIMARY KEY (a, b, c, d, e) )
WITH CLUSTERING ORDER BY (b {0}, c {1}, d {2}, e {3});
""".format(col_order[0], col_order[1], col_order[2], col_order[3]))
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 2, 0, 0, 0);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 1, 0, 0, 0);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 0, 0, 0);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 1, 2, -1);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 1, 1, -1);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 1, 1, 0);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 1, 1, 1);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 1, 0, 2);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 2, 1, -3);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, 0, 2, 0, 3);""")
cursor.execute("""INSERT INTO foo (a, b, c, d, e) VALUES (0, -1, 2, 2, 2);""")
@require("7281")
def tuple_query_mixed_order_columns_test(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'DESC', 'ASC', 'DESC', 'ASC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) > (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 2, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 2, -1],
[0, 0, 1, 1, 1], [0, 0, 2, 1, -3], [0, 0, 2, 0, 3]], res
@require("7281")
def tuple_query_mixed_order_columns_test2(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'DESC', 'DESC', 'DESC', 'ASC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) > (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 2, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 2, 1, -3],
[0, 0, 2, 0, 3], [0, 0, 1, 2, -1], [0, 0, 1, 1, 1]], res
@require("7281")
def tuple_query_mixed_order_columns_test3(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'ASC', 'DESC', 'DESC', 'ASC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) > (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 0, 2, 1, -3], [0, 0, 2, 0, 3], [0, 0, 1, 2, -1],
[0, 0, 1, 1, 1], [0, 1, 0, 0, 0], [0, 2, 0, 0, 0]], res
@require("7281")
def tuple_query_mixed_order_columns_test4(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'DESC', 'ASC', 'ASC', 'DESC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) > (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 2, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 1, 1],
[0, 0, 1, 2, -1], [0, 0, 2, 0, 3], [0, 0, 2, 1, -3]], res
@require("7281")
def tuple_query_mixed_order_columns_test5(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
Test that non mixed columns are still working.
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'DESC', 'DESC', 'DESC', 'DESC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) > (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 2, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 2, 1, -3],
[0, 0, 2, 0, 3], [0, 0, 1, 2, -1], [0, 0, 1, 1, 1]], res
@require("7281")
def tuple_query_mixed_order_columns_test6(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
Test that non mixed columns are still working.
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'ASC', 'ASC', 'ASC', 'ASC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) > (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 0, 1, 1, 1], [0, 0, 1, 2, -1], [0, 0, 2, 0, 3],
[0, 0, 2, 1, -3], [0, 1, 0, 0, 0], [0, 2, 0, 0, 0]], res
@require("7281")
def tuple_query_mixed_order_columns_test7(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'DESC', 'ASC', 'DESC', 'ASC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) <= (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 0, 0, 0, 0], [0, 0, 1, 1, -1], [0, 0, 1, 1, 0],
[0, 0, 1, 0, 2], [0, -1, 2, 2, 2]], res
@require("7281")
def tuple_query_mixed_order_columns_test8(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'ASC', 'DESC', 'DESC', 'ASC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) <= (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, -1, 2, 2, 2], [0, 0, 1, 1, -1], [0, 0, 1, 1, 0],
[0, 0, 1, 0, 2], [0, 0, 0, 0, 0]], res
@require("7281")
def tuple_query_mixed_order_columns_test9(self):
"""CASSANDRA-7281: SELECT on tuple relations are broken for mixed ASC/DESC clustering order
"""
cursor = self.prepare()
self.tuple_query_mixed_order_columns_prepare(cursor, 'DESC', 'ASC', 'DESC', 'DESC')
res = cursor.execute("SELECT * FROM foo WHERE a=0 AND (b, c, d, e) <= (0, 1, 1, 0);")
assert rows_to_list(res) == [[0, 0, 0, 0, 0], [0, 0, 1, 1, 0], [0, 0, 1, 1, -1],
[0, 0, 1, 0, 2], [0, -1, 2, 2, 2]], res
def simple_tuple_query_test(self):
"""Covers CASSANDRA-8613"""
cursor = self.prepare()
cursor.execute("create table bard (a int, b int, c int, d int , e int, PRIMARY KEY (a, b, c, d, e))")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 2, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 1, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 1, 1, 1);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 2, 2, 2);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 3, 3, 3);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 1, 1, 1);""")
res = cursor.execute("SELECT * FROM bard WHERE b=0 AND (c, d, e) > (1, 1, 1) ALLOW FILTERING;")
assert rows_to_list(res) == [[0, 0, 2, 2, 2], [0, 0, 3, 3, 3]]
def limit_sparse_test(self):
""" Validate LIMIT option for sparse table in SELECT statements """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
day int,
month text,
year int,
PRIMARY KEY (userid, url)
);
""")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, day, month, year) VALUES (%i, 'http://foo.%s', 1, 'jan', 2012)" % (id, tld))
# Queries
# Check we do get as many rows as requested
res = cursor.execute("SELECT * FROM clicks LIMIT 4")
assert len(res) == 4, res
def counters_test(self):
""" Validate counter support """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
total counter,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
cursor.execute("UPDATE clicks SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'")
res = cursor.execute("SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'")
assert rows_to_list(res) == [[1]], res
cursor.execute("UPDATE clicks SET total = total - 4 WHERE userid = 1 AND url = 'http://foo.com'")
res = cursor.execute("SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'")
assert rows_to_list(res) == [[-3]], res
cursor.execute("UPDATE clicks SET total = total+1 WHERE userid = 1 AND url = 'http://foo.com'")
res = cursor.execute("SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'")
assert rows_to_list(res) == [[-2]], res
cursor.execute("UPDATE clicks SET total = total -2 WHERE userid = 1 AND url = 'http://foo.com'")
res = cursor.execute("SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'")
assert rows_to_list(res) == [[-4]], res
def indexed_with_eq_test(self):
""" Check that you can query for an indexed column even with a key EQ clause """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
cursor.execute("CREATE INDEX byAge ON users(age)")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
res = cursor.execute("SELECT firstname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND age = 33")
assert rows_to_list(res) == [], res
res = cursor.execute("SELECT firstname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND age = 33")
assert rows_to_list(res) == [['Samwise']], res
def select_key_in_test(self):
""" Query for KEY IN (...) """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
# Inserts
cursor.execute("""
INSERT INTO users (userid, firstname, lastname, age)
VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)
""")
cursor.execute("""
INSERT INTO users (userid, firstname, lastname, age)
VALUES (f47ac10b-58cc-4372-a567-0e02b2c3d479, 'Samwise', 'Gamgee', 33)
""")
# Select
res = cursor.execute("""
SELECT firstname, lastname FROM users
WHERE userid IN (550e8400-e29b-41d4-a716-446655440000, f47ac10b-58cc-4372-a567-0e02b2c3d479)
""")
assert len(res) == 2, res
def exclusive_slice_test(self):
""" Test SELECT respects inclusive and exclusive bounds """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, %i, %i)" % (x, x))
# Queries
res = cursor.execute("SELECT v FROM test WHERE k = 0")
assert len(res) == 10, res
res = cursor.execute("SELECT v FROM test WHERE k = 0 AND c >= 2 AND c <= 6")
assert len(res) == 5 and res[0][0] == 2 and res[len(res) - 1][0] == 6, res
res = cursor.execute("SELECT v FROM test WHERE k = 0 AND c > 2 AND c <= 6")
assert len(res) == 4 and res[0][0] == 3 and res[len(res) - 1][0] == 6, res
res = cursor.execute("SELECT v FROM test WHERE k = 0 AND c >= 2 AND c < 6")
assert len(res) == 4 and res[0][0] == 2 and res[len(res) - 1][0] == 5, res
res = cursor.execute("SELECT v FROM test WHERE k = 0 AND c > 2 AND c < 6")
assert len(res) == 3 and res[0][0] == 3 and res[len(res) - 1][0] == 5, res
# With LIMIT
res = cursor.execute("SELECT v FROM test WHERE k = 0 AND c > 2 AND c <= 6 LIMIT 2")
assert len(res) == 2 and res[0][0] == 3 and res[len(res) - 1][0] == 4, res
res = cursor.execute("SELECT v FROM test WHERE k = 0 AND c >= 2 AND c < 6 ORDER BY c DESC LIMIT 2")
assert len(res) == 2 and res[0][0] == 5 and res[len(res) - 1][0] == 4, res
def in_clause_wide_rows_test(self):
""" Check IN support for 'wide rows' in SELECT statement """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test1 (k, c, v) VALUES (0, %i, %i)" % (x, x))
res = cursor.execute("SELECT v FROM test1 WHERE k = 0 AND c IN (5, 2, 8)")
if self.cluster.version() <= "1.2":
assert rows_to_list(res) == [[5], [2], [8]], res
else:
assert rows_to_list(res) == [[2], [5], [8]], res
# composites
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
) WITH COMPACT STORAGE;
""")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, 0, %i, %i)" % (x, x))
# Check first we don't allow IN everywhere
if self.cluster.version() >= '3.0':
assert_none(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3")
else:
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3")
res = cursor.execute("SELECT v FROM test2 WHERE k = 0 AND c1 = 0 AND c2 IN (5, 2, 8)")
assert rows_to_list(res) == [[2], [5], [8]], res
def order_by_test(self):
""" Check ORDER BY support in SELECT statement """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test1 (k, c, v) VALUES (0, %i, %i)" % (x, x))
res = cursor.execute("SELECT v FROM test1 WHERE k = 0 ORDER BY c DESC")
assert rows_to_list(res) == [[x] for x in range(9, -1, -1)], res
# composites
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
);
""")
# Inserts
for x in range(0, 4):
for y in range(0, 2):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, %i, %i, %i)" % (x, y, x * 2 + y))
# Check first we don't always ORDER BY
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c DESC")
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c2 DESC")
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY k DESC")
res = cursor.execute("SELECT v FROM test2 WHERE k = 0 ORDER BY c1 DESC")
assert rows_to_list(res) == [[x] for x in range(7, -1, -1)], res
res = cursor.execute("SELECT v FROM test2 WHERE k = 0 ORDER BY c1")
assert rows_to_list(res) == [[x] for x in range(0, 8)], res
def more_order_by_test(self):
""" More ORDER BY checks (#4160) """
cursor = self.prepare()
cursor.execute("""
CREATE COLUMNFAMILY Test (
row text,
number int,
string text,
PRIMARY KEY (row, number)
) WITH COMPACT STORAGE
""")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 1, 'one');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 2, 'two');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 3, 'three');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 4, 'four');")
res = cursor.execute("SELECT number FROM Test WHERE row='row' AND number < 3 ORDER BY number ASC;")
assert rows_to_list(res) == [[1], [2]], res
res = cursor.execute("SELECT number FROM Test WHERE row='row' AND number >= 3 ORDER BY number ASC;")
assert rows_to_list(res) == [[3], [4]], res
res = cursor.execute("SELECT number FROM Test WHERE row='row' AND number < 3 ORDER BY number DESC;")
assert rows_to_list(res) == [[2], [1]], res
res = cursor.execute("SELECT number FROM Test WHERE row='row' AND number >= 3 ORDER BY number DESC;")
assert rows_to_list(res) == [[4], [3]], res
res = cursor.execute("SELECT number FROM Test WHERE row='row' AND number > 3 ORDER BY number DESC;")
assert rows_to_list(res) == [[4]], res
res = cursor.execute("SELECT number FROM Test WHERE row='row' AND number <= 3 ORDER BY number DESC;")
assert rows_to_list(res) == [[3], [2], [1]], res
def order_by_validation_test(self):
""" Check we don't allow order by on row key (#4246) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
v int,
PRIMARY KEY (k1, k2)
)
""")
q = "INSERT INTO test (k1, k2, v) VALUES (%d, %d, %d)"
cursor.execute(q % (0, 0, 0))
cursor.execute(q % (1, 1, 1))
cursor.execute(q % (2, 2, 2))
assert_invalid(cursor, "SELECT * FROM test ORDER BY k2")
def order_by_with_in_test(self):
""" Check that order-by works with IN (#4327) """
cursor = self.prepare()
cursor.default_fetch_size = None
cursor.execute("""
CREATE TABLE test(
my_id varchar,
col1 int,
value varchar,
PRIMARY KEY (my_id, col1)
)
""")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key1', 1, 'a')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key2', 3, 'c')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key3', 2, 'b')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key4', 4, 'd')")
query = SimpleStatement("SELECT col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1")
res = cursor.execute(query)
assert rows_to_list(res) == [[1], [2], [3]], res
query = SimpleStatement("SELECT col1, my_id FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1")
res = cursor.execute(query)
assert rows_to_list(res) == [[1, 'key1'], [2, 'key3'], [3, 'key2']], res
query = SimpleStatement("SELECT my_id, col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1")
res = cursor.execute(query)
assert rows_to_list(res) == [['key1', 1], ['key3', 2], ['key2', 3]], res
def reversed_comparator_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH CLUSTERING ORDER BY (c DESC);
""")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, %i, %i)" % (x, x))
res = cursor.execute("SELECT c, v FROM test WHERE k = 0 ORDER BY c ASC")
assert rows_to_list(res) == [[x, x] for x in range(0, 10)], res
res = cursor.execute("SELECT c, v FROM test WHERE k = 0 ORDER BY c DESC")
assert rows_to_list(res) == [[x, x] for x in range(9, -1, -1)], res
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v text,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
# Inserts
for x in range(0, 10):
for y in range(0, 10):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, %i, %i, '%i%i')" % (x, y, x, y))
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC, c2 ASC")
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 DESC, c2 DESC")
res = cursor.execute("SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC")
assert rows_to_list(res) == [[x, y, '%i%i' % (x, y)] for x in range(0, 10) for y in range(9, -1, -1)], res
res = cursor.execute("SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC, c2 DESC")
assert rows_to_list(res) == [[x, y, '%i%i' % (x, y)] for x in range(0, 10) for y in range(9, -1, -1)], res
res = cursor.execute("SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 DESC, c2 ASC")
assert rows_to_list(res) == [[x, y, '%i%i' % (x, y)] for x in range(9, -1, -1) for y in range(0, 10)], res
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c2 DESC, c1 ASC")
def invalid_old_property_test(self):
""" Check obsolete properties from CQL2 are rejected """
cursor = self.prepare()
assert_invalid(cursor, "CREATE TABLE test (foo text PRIMARY KEY, c int) WITH default_validation=timestamp", expected=SyntaxException)
cursor.execute("CREATE TABLE test (foo text PRIMARY KEY, c int)")
assert_invalid(cursor, "ALTER TABLE test WITH default_validation=int;", expected=SyntaxException)
def null_support_test(self):
""" Test support for nulls """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v1 int,
v2 set<text>,
PRIMARY KEY (k, c)
);
""")
# Inserts
cursor.execute("INSERT INTO test (k, c, v1, v2) VALUES (0, 0, null, {'1', '2'})")
cursor.execute("INSERT INTO test (k, c, v1) VALUES (0, 1, 1)")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[0, 0, None, set(['1', '2'])], [0, 1, 1, None]], res
cursor.execute("INSERT INTO test (k, c, v1) VALUES (0, 1, null)")
cursor.execute("INSERT INTO test (k, c, v2) VALUES (0, 0, null)")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[0, 0, None, None], [0, 1, None, None]], res
assert_invalid(cursor, "INSERT INTO test (k, c, v2) VALUES (0, 2, {1, null})")
assert_invalid(cursor, "SELECT * FROM test WHERE k = null")
assert_invalid(cursor, "INSERT INTO test (k, c, v2) VALUES (0, 0, { 'foo', 'bar', null })")
def nameless_index_test(self):
""" Test CREATE INDEX without name and validate the index can be dropped """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE users (
id text PRIMARY KEY,
birth_year int,
)
""")
cursor.execute("CREATE INDEX on users(birth_year)")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Tom', 42)")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Paul', 24)")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Bob', 42)")
res = cursor.execute("SELECT id FROM users WHERE birth_year = 42")
assert rows_to_list(res) == [['Tom'], ['Bob']]
cursor.execute("DROP INDEX users_birth_year_idx")
assert_invalid(cursor, "SELECT id FROM users WHERE birth_year = 42")
def deletion_test(self):
""" Test simple deletion and in particular check for #4193 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE testcf (
username varchar,
id int,
name varchar,
stuff varchar,
PRIMARY KEY(username, id)
);
""")
q = "INSERT INTO testcf (username, id, name, stuff) VALUES ('%s', %d, '%s', '%s');"
row1 = ('abc', 2, 'rst', 'some value')
row2 = ('abc', 4, 'xyz', 'some other value')
cursor.execute(q % row1)
cursor.execute(q % row2)
res = cursor.execute("SELECT * FROM testcf")
assert rows_to_list(res) == [list(row1), list(row2)], res
cursor.execute("DELETE FROM testcf WHERE username='abc' AND id=2")
res = cursor.execute("SELECT * FROM testcf")
assert rows_to_list(res) == [list(row2)], res
# Compact case
cursor.execute("""
CREATE TABLE testcf2 (
username varchar,
id int,
name varchar,
stuff varchar,
PRIMARY KEY(username, id, name)
) WITH COMPACT STORAGE;
""")
q = "INSERT INTO testcf2 (username, id, name, stuff) VALUES ('%s', %d, '%s', '%s');"
row1 = ('abc', 2, 'rst', 'some value')
row2 = ('abc', 4, 'xyz', 'some other value')
cursor.execute(q % row1)
cursor.execute(q % row2)
res = cursor.execute("SELECT * FROM testcf2")
assert rows_to_list(res) == [list(row1), list(row2)], res
# Won't be allowed until #3708 is in
if self.cluster.version() < "1.2":
assert_invalid(cursor, "DELETE FROM testcf2 WHERE username='abc' AND id=2")
def count_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE events (
kind text,
time int,
value1 int,
value2 int,
PRIMARY KEY(kind, time)
)
""")
full = "INSERT INTO events (kind, time, value1, value2) VALUES ('ev1', %d, %d, %d)"
no_v2 = "INSERT INTO events (kind, time, value1) VALUES ('ev1', %d, %d)"
cursor.execute(full % (0, 0, 0))
cursor.execute(full % (1, 1, 1))
cursor.execute(no_v2 % (2, 2))
cursor.execute(full % (3, 3, 3))
cursor.execute(no_v2 % (4, 4))
cursor.execute("INSERT INTO events (kind, time, value1, value2) VALUES ('ev2', 0, 0, 0)")
res = cursor.execute("SELECT COUNT(*) FROM events WHERE kind = 'ev1'")
assert rows_to_list(res) == [[5]], res
res = cursor.execute("SELECT COUNT(1) FROM events WHERE kind IN ('ev1', 'ev2') AND time=0")
assert rows_to_list(res) == [[2]], res
def reserved_keyword_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
key text PRIMARY KEY,
count counter,
)
""")
assert_invalid(cursor, "CREATE TABLE test2 ( select text PRIMARY KEY, x int)", expected=SyntaxException)
def identifier_test(self):
cursor = self.prepare()
# Test case insensitivity
cursor.execute("CREATE TABLE test1 (key_23 int PRIMARY KEY, CoLuMn int)")
# Should work
cursor.execute("INSERT INTO test1 (Key_23, Column) VALUES (0, 0)")
cursor.execute("INSERT INTO test1 (KEY_23, COLUMN) VALUES (0, 0)")
# invalid due to repeated identifiers
assert_invalid(cursor, "INSERT INTO test1 (key_23, column, column) VALUES (0, 0, 0)")
assert_invalid(cursor, "INSERT INTO test1 (key_23, column, COLUMN) VALUES (0, 0, 0)")
assert_invalid(cursor, "INSERT INTO test1 (key_23, key_23, column) VALUES (0, 0, 0)")
assert_invalid(cursor, "INSERT INTO test1 (key_23, KEY_23, column) VALUES (0, 0, 0)")
# Reserved keywords
assert_invalid(cursor, "CREATE TABLE test1 (select int PRIMARY KEY, column int)", expected=SyntaxException)
#def keyspace_test(self):
# cursor = self.prepare()
# assert_invalid(cursor, "CREATE KEYSPACE test1")
# cursor.execute("CREATE KEYSPACE test2 WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }")
# assert_invalid(cursor, "CREATE KEYSPACE My_much_much_too_long_identifier_that_should_not_work WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }")
# cursor.execute("DROP KEYSPACE test2")
# assert_invalid(cursor, "DROP KEYSPACE non_existing")
# cursor.execute("CREATE KEYSPACE test2 WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }")
#def table_test(self):
# cursor = self.prepare()
# cursor.execute("""
# CREATE TABLE test1 (
# k int PRIMARY KEY,
# c int
# )
# """)
# cursor.execute("""
# CREATE TABLE test2 (
# k int,
# name int,
# value int,
# PRIMARY KEY(k, name)
# ) WITH COMPACT STORAGE
# """)
# cursor.execute("""
# CREATE TABLE test3 (
# k int,
# c int,
# PRIMARY KEY (k),
# )
# """)
# # existing table
# assert_invalid(cursor, "CREATE TABLE test3 (k int PRIMARY KEY, c int)")
# # repeated column
# assert_invalid(cursor, "CREATE TABLE test4 (k int PRIMARY KEY, c int, k text)")
# # compact storage limitations
# assert_invalid(cursor, "CREATE TABLE test4 (k int, name, int, c1 int, c2 int, PRIMARY KEY(k, name)) WITH COMPACT STORAGE")
# cursor.execute("DROP TABLE test1")
# cursor.execute("TRUNCATE test2")
# cursor.execute("""
# CREATE TABLE test1 (
# k int PRIMARY KEY,
# c1 int,
# c2 int,
# )
# """)
def batch_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE users (
userid text PRIMARY KEY,
name text,
password text
)
""")
query = SimpleStatement("""
BEGIN BATCH
INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
DELETE name FROM users WHERE userid = 'user1';
APPLY BATCH;
""", consistency_level=ConsistencyLevel.QUORUM)
cursor.execute(query)
def token_range_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
c int,
v int
)
""")
c = 100
for i in range(0, c):
cursor.execute("INSERT INTO test (k, c, v) VALUES (%d, %d, %d)" % (i, i, i))
rows = cursor.execute("SELECT k FROM test")
inOrder = [x[0] for x in rows]
assert len(inOrder) == c, 'Expecting %d elements, got %d' % (c, len(inOrder))
if self.cluster.version() < '1.2':
cursor.execute("SELECT k FROM test WHERE token(k) >= 0")
else:
min_token = -2 ** 63
res = cursor.execute("SELECT k FROM test WHERE token(k) >= %d" % min_token)
assert len(res) == c, "%s [all: %s]" % (str(res), str(inOrder))
#assert_invalid(cursor, "SELECT k FROM test WHERE token(k) >= 0")
#cursor.execute("SELECT k FROM test WHERE token(k) >= 0")
res = cursor.execute("SELECT k FROM test WHERE token(k) >= token(%d) AND token(k) < token(%d)" % (inOrder[32], inOrder[65]))
assert rows_to_list(res) == [[inOrder[x]] for x in range(32, 65)], "%s [all: %s]" % (str(res), str(inOrder))
def table_options_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
c int
) WITH comment = 'My comment'
AND read_repair_chance = 0.5
AND dclocal_read_repair_chance = 0.5
AND gc_grace_seconds = 4
AND bloom_filter_fp_chance = 0.01
AND compaction = { 'class' : 'LeveledCompactionStrategy',
'sstable_size_in_mb' : 10 }
AND compression = { 'sstable_compression' : '' }
AND caching = 'all'
""")
cursor.execute("""
ALTER TABLE test
WITH comment = 'other comment'
AND read_repair_chance = 0.3
AND dclocal_read_repair_chance = 0.3
AND gc_grace_seconds = 100
AND bloom_filter_fp_chance = 0.1
AND compaction = { 'class' : 'SizeTieredCompactionStrategy',
'min_sstable_size' : 42 }
AND compression = { 'sstable_compression' : 'SnappyCompressor' }
AND caching = 'rows_only'
""")
def timestamp_and_ttl_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
c text,
d text
)
""")
cursor.execute("INSERT INTO test (k, c) VALUES (1, 'test')")
cursor.execute("INSERT INTO test (k, c) VALUES (2, 'test') USING TTL 400")
res = cursor.execute("SELECT k, c, writetime(c), ttl(c) FROM test")
assert len(res) == 2, res
for r in res:
assert isinstance(r[2], (int, long))
if r[0] == 1:
assert r[3] == None, res
else:
assert isinstance(r[3], (int, long)), res
# wrap writetime(), ttl() in other functions (test for CASSANDRA-8451)
res = cursor.execute("SELECT k, c, blobAsBigint(bigintAsBlob(writetime(c))), ttl(c) FROM test")
assert len(res) == 2, res
for r in res:
assert isinstance(r[2], (int, long))
if r[0] == 1:
assert r[3] == None, res
else:
assert isinstance(r[3], (int, long)), res
res = cursor.execute("SELECT k, c, writetime(c), blobAsInt(intAsBlob(ttl(c))) FROM test")
assert len(res) == 2, res
for r in res:
assert isinstance(r[2], (int, long))
if r[0] == 1:
assert r[3] == None, res
else:
assert isinstance(r[3], (int, long)), res
assert_invalid(cursor, "SELECT k, c, writetime(k) FROM test")
res = cursor.execute("SELECT k, d, writetime(d) FROM test WHERE k = 1")
assert rows_to_list(res) == [[1, None, None]]
def no_range_ghost_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
)
""")
for k in range(0, 5):
cursor.execute("INSERT INTO test (k, v) VALUES (%d, 0)" % k)
unsorted_res = cursor.execute("SELECT k FROM test")
res = sorted(unsorted_res)
assert rows_to_list(res) == [[k] for k in range(0, 5)], res
cursor.execute("DELETE FROM test WHERE k=2")
unsorted_res = cursor.execute("SELECT k FROM test")
res = sorted(unsorted_res)
assert rows_to_list(res) == [[k] for k in range(0, 5) if k is not 2], res
# Example from #3505
cursor.execute("CREATE KEYSPACE ks1 with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };")
cursor.execute("USE ks1")
cursor.execute("""
CREATE COLUMNFAMILY users (
KEY varchar PRIMARY KEY,
password varchar,
gender varchar,
birth_year bigint)
""")
cursor.execute("INSERT INTO users (KEY, password) VALUES ('user1', 'ch@ngem3a')")
cursor.execute("UPDATE users SET gender = 'm', birth_year = 1980 WHERE KEY = 'user1'")
res = cursor.execute("SELECT * FROM users WHERE KEY='user1'")
assert rows_to_list(res) == [['user1', 1980, 'm', 'ch@ngem3a']], res
cursor.execute("TRUNCATE users")
res = cursor.execute("SELECT * FROM users")
assert rows_to_list(res) == [], res
res = cursor.execute("SELECT * FROM users WHERE KEY='user1'")
assert rows_to_list(res) == [], res
@freshCluster()
def undefined_column_handling_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
)
""")
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test (k, v1) VALUES (1, 1)")
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (2, 2, 2)")
res = cursor.execute("SELECT v2 FROM test")
assert rows_to_list(res) == [[0], [None], [2]], res
res = cursor.execute("SELECT v2 FROM test WHERE k = 1")
assert rows_to_list(res) == [[None]], res
@freshCluster()
def range_tombstones_test(self):
""" Test deletion by 'composite prefix' (range tombstones) """
cluster = self.cluster
# Uses 3 nodes just to make sure RowMutation are correctly serialized
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
cursor = self.patient_cql_connection(node1, version=cql_version)
self.create_ks(cursor, 'ks', 1)
cursor.execute("""
CREATE TABLE test1 (
k int,
c1 int,
c2 int,
v1 int,
v2 int,
PRIMARY KEY (k, c1, c2)
);
""")
time.sleep(1)
rows = 5
col1 = 2
col2 = 2
cpr = col1 * col2
for i in xrange(0, rows):
for j in xrange(0, col1):
for k in xrange(0, col2):
n = (i * cpr) + (j * col2) + k
cursor.execute("INSERT INTO test1 (k, c1, c2, v1, v2) VALUES (%d, %d, %d, %d, %d)" % (i, j, k, n, n))
for i in xrange(0, rows):
res = cursor.execute("SELECT v1, v2 FROM test1 where k = %d" % i)
assert rows_to_list(res) == [[x, x] for x in xrange(i * cpr, (i + 1) * cpr)], res
for i in xrange(0, rows):
cursor.execute("DELETE FROM test1 WHERE k = %d AND c1 = 0" % i)
for i in xrange(0, rows):
res = cursor.execute("SELECT v1, v2 FROM test1 WHERE k = %d" % i)
assert rows_to_list(res) == [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)], res
cluster.flush()
time.sleep(0.2)
for i in xrange(0, rows):
res = cursor.execute("SELECT v1, v2 FROM test1 WHERE k = %d" % i)
assert rows_to_list(res) == [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)], res
def range_tombstones_compaction_test(self):
""" Test deletion by 'composite prefix' (range tombstones) with compaction """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c1 int,
c2 int,
v1 text,
PRIMARY KEY (k, c1, c2)
);
""")
for c1 in range(0, 4):
for c2 in range(0, 2):
cursor.execute("INSERT INTO test1 (k, c1, c2, v1) VALUES (0, %d, %d, '%s')" % (c1, c2, '%i%i' % (c1, c2)))
self.cluster.flush()
cursor.execute("DELETE FROM test1 WHERE k = 0 AND c1 = 1")
self.cluster.flush()
self.cluster.compact()
res = cursor.execute("SELECT v1 FROM test1 WHERE k = 0")
assert rows_to_list(res) == [['%i%i' % (c1, c2)] for c1 in xrange(0, 4) for c2 in xrange(0, 2) if c1 != 1], res
def delete_row_test(self):
""" Test deletion of rows """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c1 int,
c2 int,
v1 int,
v2 int,
PRIMARY KEY (k, c1, c2)
);
""")
q = "INSERT INTO test (k, c1, c2, v1, v2) VALUES (%d, %d, %d, %d, %d)"
cursor.execute(q % (0, 0, 0, 0, 0))
cursor.execute(q % (0, 0, 1, 1, 1))
cursor.execute(q % (0, 0, 2, 2, 2))
cursor.execute(q % (0, 1, 0, 3, 3))
cursor.execute("DELETE FROM test WHERE k = 0 AND c1 = 0 AND c2 = 0")
res = cursor.execute("SELECT * FROM test")
assert len(res) == 3, res
def range_query_2ndary_test(self):
""" Test range queries with 2ndary indexes (#4257) """
cursor = self.prepare()
cursor.execute("CREATE TABLE indextest (id int primary key, row int, setid int);")
cursor.execute("CREATE INDEX indextest_setid_idx ON indextest (setid)")
q = "INSERT INTO indextest (id, row, setid) VALUES (%d, %d, %d);"
cursor.execute(q % (0, 0, 0))
cursor.execute(q % (1, 1, 0))
cursor.execute(q % (2, 2, 0))
cursor.execute(q % (3, 3, 0))
assert_invalid(cursor, "SELECT * FROM indextest WHERE setid = 0 AND row < 1;")
res = cursor.execute("SELECT * FROM indextest WHERE setid = 0 AND row < 1 ALLOW FILTERING;")
assert rows_to_list(res) == [[0, 0, 0]], res
def compression_option_validation_test(self):
""" Check for unknown compression parameters options (#4266) """
cursor = self.prepare()
assert_invalid(cursor, """
CREATE TABLE users (key varchar PRIMARY KEY, password varchar, gender varchar)
WITH compression_parameters:sstable_compressor = 'DeflateCompressor';
""", expected=SyntaxException)
if self.cluster.version() >= '1.2':
assert_invalid(cursor, """
CREATE TABLE users (key varchar PRIMARY KEY, password varchar, gender varchar)
WITH compression = { 'sstable_compressor' : 'DeflateCompressor' };
""", expected=ConfigurationException)
def keyspace_creation_options_test(self):
""" Check one can use arbitrary name for datacenter when creating keyspace (#4278) """
cursor = self.prepare()
# we just want to make sure the following is valid
if self.cluster.version() >= '1.2':
cursor.execute("""
CREATE KEYSPACE Foo
WITH replication = { 'class' : 'NetworkTopologyStrategy',
'us-east' : 1,
'us-west' : 1 };
""")
else:
cursor.execute("""
CREATE KEYSPACE Foo
WITH strategy_class='NetworkTopologyStrategy'
AND strategy_options:"us-east"=1
AND strategy_options:"us-west"=1;
""")
def set_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
tags set<text>,
PRIMARY KEY (fn, ln)
)
""")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "tags = tags + { 'foo' }")
cursor.execute(q % "tags = tags + { 'bar' }")
cursor.execute(q % "tags = tags + { 'foo' }")
cursor.execute(q % "tags = tags + { 'foobar' }")
cursor.execute(q % "tags = tags - { 'bar' }")
res = cursor.execute("SELECT tags FROM user")
assert rows_to_list(res) == [[set(['foo', 'foobar'])]], res
q = "UPDATE user SET %s WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q % "tags = { 'a', 'c', 'b' }")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert rows_to_list(res) == [[set(['a', 'b', 'c'])]], res
time.sleep(.01)
cursor.execute(q % "tags = { 'm', 'n' }")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert rows_to_list(res) == [[set(['m', 'n'])]], res
cursor.execute("DELETE tags['m'] FROM user WHERE fn='Bilbo' AND ln='Baggins'")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert rows_to_list(res) == [[set(['n'])]], res
cursor.execute("DELETE tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
if self.cluster.version() <= "1.2":
assert rows_to_list(res) == [None], res
else:
assert rows_to_list(res) == [], res
def map_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
m map<text, int>,
PRIMARY KEY (fn, ln)
)
""")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "m['foo'] = 3")
cursor.execute(q % "m['bar'] = 4")
cursor.execute(q % "m['woot'] = 5")
cursor.execute(q % "m['bar'] = 6")
cursor.execute("DELETE m['foo'] FROM user WHERE fn='Tom' AND ln='Bombadil'")
res = cursor.execute("SELECT m FROM user")
assert rows_to_list(res) == [[{'woot': 5, 'bar': 6}]], res
q = "UPDATE user SET %s WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q % "m = { 'a' : 4 , 'c' : 3, 'b' : 2 }")
res = cursor.execute("SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert rows_to_list(res) == [[{'a': 4, 'b': 2, 'c': 3}]], res
time.sleep(.01)
# Check we correctly overwrite
cursor.execute(q % "m = { 'm' : 4 , 'n' : 1, 'o' : 2 }")
res = cursor.execute("SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert rows_to_list(res) == [[{'m': 4, 'n': 1, 'o': 2}]], res
cursor.execute(q % "m = {}")
res = cursor.execute("SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'")
if self.cluster.version() <= "1.2":
assert rows_to_list(res) == [None], res
else:
assert rows_to_list(res) == [], res
def list_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
tags list<text>,
PRIMARY KEY (fn, ln)
)
""")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "tags = tags + [ 'foo' ]")
cursor.execute(q % "tags = tags + [ 'bar' ]")
cursor.execute(q % "tags = tags + [ 'foo' ]")
cursor.execute(q % "tags = tags + [ 'foobar' ]")
res = cursor.execute("SELECT tags FROM user")
self.assertItemsEqual(rows_to_list(res), [[['foo', 'bar', 'foo', 'foobar']]])
q = "UPDATE user SET %s WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q % "tags = [ 'a', 'c', 'b', 'c' ]")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
self.assertItemsEqual(rows_to_list(res), [[['a', 'c', 'b', 'c']]])
cursor.execute(q % "tags = [ 'm', 'n' ] + tags")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
self.assertItemsEqual(rows_to_list(res), [[['m', 'n', 'a', 'c', 'b', 'c']]])
cursor.execute(q % "tags[2] = 'foo', tags[4] = 'bar'")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
self.assertItemsEqual(rows_to_list(res), [[['m', 'n', 'foo', 'c', 'bar', 'c']]])
cursor.execute("DELETE tags[2] FROM user WHERE fn='Bilbo' AND ln='Baggins'")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
self.assertItemsEqual(rows_to_list(res), [[['m', 'n', 'c', 'bar', 'c']]])
cursor.execute(q % "tags = tags - [ 'bar' ]")
res = cursor.execute("SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
self.assertItemsEqual(rows_to_list(res), [[['m', 'n', 'c', 'c']]])
def multi_collection_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE foo(
k uuid PRIMARY KEY,
L list<int>,
M map<text, int>,
S set<int>
);
""")
cursor.execute("UPDATE ks.foo SET L = [1, 3, 5] WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET L = L + [7, 11, 13] WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET S = {1, 3, 5} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET S = S + {7, 11, 13} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET M = {'foo': 1, 'bar' : 3} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET M = M + {'foobar' : 4} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
res = cursor.execute("SELECT L, M, S FROM foo WHERE k = b017f48f-ae67-11e1-9096-005056c00008")
self.assertItemsEqual(rows_to_list(res), [[
[1, 3, 5, 7, 11, 13],
OrderedDict([('bar', 3), ('foo', 1), ('foobar', 4)]),
sortedset([1, 3, 5, 7, 11, 13])
]])
def range_query_test(self):
""" Range test query from #4372 """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (a int, b int, c int, d int, e int, f text, PRIMARY KEY (a, b, c, d, e) )")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 2, '2');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 1, '1');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 2, 1, '1');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 3, '3');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 5, '5');")
res = cursor.execute("SELECT a, b, c, d, e, f FROM test WHERE a = 1 AND b = 1 AND c = 1 AND d = 1 AND e >= 2;")
assert rows_to_list(res) == [[1, 1, 1, 1, 2, u'2'], [1, 1, 1, 1, 3, u'3'], [1, 1, 1, 1, 5, u'5']], res
def update_type_test(self):
""" Test altering the type of a column, including the one in the primary key (#4041) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text,
c text,
s set<text>,
v text,
PRIMARY KEY (k, c)
)
""")
req = "INSERT INTO test (k, c, v, s) VALUES ('%s', '%s', '%s', {'%s'})"
# using utf8 character so that we can see the transition to BytesType
cursor.execute(req % ('ɸ', 'ɸ', 'ɸ', 'ɸ'))
cursor.execute("SELECT * FROM test")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[u'ɸ', u'ɸ', set([u'ɸ']), u'ɸ']], res
cursor.execute("ALTER TABLE test ALTER v TYPE blob")
res = cursor.execute("SELECT * FROM test")
# the last should not be utf8 but a raw string
assert rows_to_list(res) == [[u'ɸ', u'ɸ', set([u'ɸ']), 'ɸ']], res
cursor.execute("ALTER TABLE test ALTER k TYPE blob")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [['ɸ', u'ɸ', set([u'ɸ']), 'ɸ']], res
cursor.execute("ALTER TABLE test ALTER c TYPE blob")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [['ɸ', 'ɸ', set([u'ɸ']), 'ɸ']], res
if self.cluster.version() < "2.1":
assert_invalid(cursor, "ALTER TABLE test ALTER s TYPE set<blob>", expected=ConfigurationException)
else:
cursor.execute("ALTER TABLE test ALTER s TYPE set<blob>")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [['ɸ', 'ɸ', set(['ɸ']), 'ɸ']], res
def composite_row_key_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
c int,
v int,
PRIMARY KEY ((k1, k2), c)
)
""")
req = "INSERT INTO test (k1, k2, c, v) VALUES (%d, %d, %d, %d)"
for i in range(0, 4):
cursor.execute(req % (0, i, i, i))
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[0, 2, 2, 2], [0, 3, 3, 3], [0, 0, 0, 0], [0, 1, 1, 1]], res
res = cursor.execute("SELECT * FROM test WHERE k1 = 0 and k2 IN (1, 3)")
assert rows_to_list(res) == [[0, 1, 1, 1], [0, 3, 3, 3]], res
assert_invalid(cursor, "SELECT * FROM test WHERE k2 = 3")
v = self.cluster.version()
if v < "3.0.0":
assert_invalid(cursor, "SELECT * FROM test WHERE k1 IN (0, 1) and k2 = 3")
res = cursor.execute("SELECT * FROM test WHERE token(k1, k2) = token(0, 1)")
assert rows_to_list(res) == [[0, 1, 1, 1]], res
res = cursor.execute("SELECT * FROM test WHERE token(k1, k2) > " + str(-((2 ** 63) - 1)))
assert rows_to_list(res) == [[0, 2, 2, 2], [0, 3, 3, 3], [0, 0, 0, 0], [0, 1, 1, 1]], res
def cql3_insert_thrift_test(self):
""" Check that we can insert from thrift into a CQL3 table (#4377) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
)
""")
cli = self.cluster.nodelist()[0].cli()
cli.do("use ks")
cli.do("set test[2]['4:v'] = int(200)")
assert not cli.has_errors(), cli.errors()
time.sleep(1.5)
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[2, 4, 200]], res
def row_existence_test(self):
""" Check the semantic of CQL row existence (part of #4361) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v1 int,
v2 int,
PRIMARY KEY (k, c)
)
""")
cursor.execute("INSERT INTO test (k, c, v1, v2) VALUES (1, 1, 1, 1)")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[1, 1, 1, 1]], res
assert_invalid(cursor, "DELETE c FROM test WHERE k = 1 AND c = 1")
cursor.execute("DELETE v2 FROM test WHERE k = 1 AND c = 1")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[1, 1, 1, None]], res
cursor.execute("DELETE v1 FROM test WHERE k = 1 AND c = 1")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[1, 1, None, None]], res
cursor.execute("DELETE FROM test WHERE k = 1 AND c = 1")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [], res
cursor.execute("INSERT INTO test (k, c) VALUES (2, 2)")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[2, 2, None, None]], res
@freshCluster()
def only_pk_test(self):
""" Check table with only a PK (#4361) """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test (
k int,
c int,
PRIMARY KEY (k, c)
)
""")
q = "INSERT INTO test (k, c) VALUES (%d, %d)"
for k in range(0, 2):
for c in range(0, 2):
cursor.execute(q % (k, c))
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[x, y] for x in range(0, 2) for y in range(0, 2)], res
# Check for dense tables too
cursor.execute("""
CREATE TABLE test2 (
k int,
c int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE
""")
q = "INSERT INTO test2 (k, c) VALUES (%d, %d)"
for k in range(0, 2):
for c in range(0, 2):
cursor.execute(q % (k, c))
res = cursor.execute("SELECT * FROM test2")
assert rows_to_list(res) == [[x, y] for x in range(0, 2) for y in range(0, 2)], res
def date_test(self):
""" Check dates are correctly recognized and validated """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
t timestamp
)
""")
cursor.execute("INSERT INTO test (k, t) VALUES (0, '2011-02-03')")
assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, '2011-42-42')")
@freshCluster()
def range_slice_test(self):
""" Test a regression from #1337 """
cluster = self.cluster
cluster.populate(2).start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
cursor = self.patient_cql_connection(node1, version=cql_version)
self.create_ks(cursor, 'ks', 1)
cursor.execute("""
CREATE TABLE test (
k text PRIMARY KEY,
v int
);
""")
time.sleep(1)
cursor.execute("INSERT INTO test (k, v) VALUES ('foo', 0)")
cursor.execute("INSERT INTO test (k, v) VALUES ('bar', 1)")
res = cursor.execute("SELECT * FROM test")
assert len(res) == 2, res
@freshCluster()
def composite_index_with_pk_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE blogs (
blog_id int,
time1 int,
time2 int,
author text,
content text,
PRIMARY KEY (blog_id, time1, time2)
)
""")
cursor.execute("CREATE INDEX ON blogs(author)")
req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', '%s')"
cursor.execute(req % (1, 0, 0, 'foo', 'bar1'))
cursor.execute(req % (1, 0, 1, 'foo', 'bar2'))
cursor.execute(req % (2, 1, 0, 'foo', 'baz'))
cursor.execute(req % (3, 0, 1, 'gux', 'qux'))
res = cursor.execute("SELECT blog_id, content FROM blogs WHERE author='foo'")
assert rows_to_list(res) == [[1, 'bar1'], [1, 'bar2'], [2, 'baz']], res
res = cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo' ALLOW FILTERING")
assert rows_to_list(res) == [[2, 'baz']], res
res = cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo' ALLOW FILTERING")
assert rows_to_list(res) == [[2, 'baz']], res
res = cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo' ALLOW FILTERING")
assert rows_to_list(res) == [[2, 'baz']], res
res = cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo' ALLOW FILTERING")
assert rows_to_list(res) == [], res
res = cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo' ALLOW FILTERING")
assert rows_to_list(res) == [], res
assert_invalid(cursor, "SELECT content FROM blogs WHERE time2 >= 0 AND author='foo'")
# as discussed in CASSANDRA-8148, some queries that should have required ALLOW FILTERING
# in 2.0 have been fixed for 3.0
v = self.cluster.version()
if v < "3.0.0":
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo'")
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo'")
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo'")
cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo'")
cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo'")
else:
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo'")
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo'")
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo'")
assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo'")
assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo'")
@freshCluster()
def limit_bugs_test(self):
""" Test for LIMIT bugs from 4579 """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE testcf (
a int,
b int,
c int,
d int,
e int,
PRIMARY KEY (a, b)
);
""")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (1, 1, 1, 1, 1);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (2, 2, 2, 2, 2);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (3, 3, 3, 3, 3);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (4, 4, 4, 4, 4);")
res = cursor.execute("SELECT * FROM testcf;")
assert rows_to_list(res) == [[1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], res
res = cursor.execute("SELECT * FROM testcf LIMIT 1;") # columns d and e in result row are null
assert rows_to_list(res) == [[1, 1, 1, 1, 1]], res
res = cursor.execute("SELECT * FROM testcf LIMIT 2;") # columns d and e in last result row are null
assert rows_to_list(res) == [[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]], res
cursor.execute("""
CREATE TABLE testcf2 (
a int primary key,
b int,
c int,
);
""")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (1, 1, 1);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (2, 2, 2);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (3, 3, 3);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (4, 4, 4);")
res = cursor.execute("SELECT * FROM testcf2;")
assert rows_to_list(res) == [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]], res
res = cursor.execute("SELECT * FROM testcf2 LIMIT 1;") # gives 1 row
assert rows_to_list(res) == [[1, 1, 1]], res
res = cursor.execute("SELECT * FROM testcf2 LIMIT 2;") # gives 1 row
assert rows_to_list(res) == [[1, 1, 1], [2, 2, 2]], res
res = cursor.execute("SELECT * FROM testcf2 LIMIT 3;") # gives 2 rows
assert rows_to_list(res) == [[1, 1, 1], [2, 2, 2], [3, 3, 3]], res
res = cursor.execute("SELECT * FROM testcf2 LIMIT 4;") # gives 2 rows
assert rows_to_list(res) == [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]], res
res = cursor.execute("SELECT * FROM testcf2 LIMIT 5;") # gives 3 rows
assert rows_to_list(res) == [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]], res
def bug_4532_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE compositetest(
status ascii,
ctime bigint,
key ascii,
nil ascii,
PRIMARY KEY (status, ctime, key)
)
""")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345678,'key1','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345678,'key2','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key3','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key4','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key5','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345680,'key6','')")
assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime>=12345679 AND key='key3' AND ctime<=12345680 LIMIT 3;")
assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime=12345679 AND key='key3' AND ctime<=12345680 LIMIT 3")
@freshCluster()
def order_by_multikey_test(self):
""" Test for #4612 bug and more generaly order by when multiple C* rows are queried """
cursor = self.prepare(ordered=True)
cursor.default_fetch_size = None
cursor.execute("""
CREATE TABLE test(
my_id varchar,
col1 int,
col2 int,
value varchar,
PRIMARY KEY (my_id, col1, col2)
);
""")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key1', 1, 1, 'a');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key2', 3, 3, 'a');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key3', 2, 2, 'b');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key4', 2, 1, 'b');")
res = cursor.execute("SELECT col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1;")
assert rows_to_list(res) == [[1], [2], [3]], res
res = cursor.execute("SELECT col1, value, my_id, col2 FROM test WHERE my_id in('key3', 'key4') ORDER BY col1, col2;")
assert rows_to_list(res) == [[2, 'b', 'key4', 1], [2, 'b', 'key3', 2]], res
assert_invalid(cursor, "SELECT col1 FROM test ORDER BY col1;")
assert_invalid(cursor, "SELECT col1 FROM test WHERE my_id > 'key1' ORDER BY col1;")
@freshCluster()
def create_alter_options_test(self):
cursor = self.prepare(create_keyspace=False)
assert_invalid(cursor, "CREATE KEYSPACE ks1", expected=SyntaxException)
assert_invalid(cursor, "CREATE KEYSPACE ks1 WITH replication= { 'replication_factor' : 1 }", expected=ConfigurationException)
cursor.execute("CREATE KEYSPACE ks1 WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }")
cursor.execute("CREATE KEYSPACE ks2 WITH replication={ 'class' : 'SimpleStrategy', 'replication_factor' : 1 } AND durable_writes=false")
if self.cluster.version() >= '3.0':
assert_all(cursor, "SELECT keyspace_name, durable_writes FROM system.schema_keyspaces",
[['system_auth', True], ['ks1', True], ['system', True], ['system_traces', True], ['ks2', False]])
else:
assert_all(cursor, "SELECT keyspace_name, durable_writes FROM system.schema_keyspaces",
[['ks1', True], ['system', True], ['system_traces', True], ['ks2', False]])
cursor.execute("ALTER KEYSPACE ks1 WITH replication = { 'class' : 'NetworkTopologyStrategy', 'dc1' : 1 } AND durable_writes=False")
cursor.execute("ALTER KEYSPACE ks2 WITH durable_writes=true")
if self.cluster.version() >= '3.0':
assert_all(cursor, "SELECT keyspace_name, durable_writes, strategy_class FROM system.schema_keyspaces",
[[u'system_auth', True, u'org.apache.cassandra.locator.SimpleStrategy'],
[u'ks1', False, u'org.apache.cassandra.locator.NetworkTopologyStrategy'],
[u'system', True, u'org.apache.cassandra.locator.LocalStrategy'],
[u'system_traces', True, u'org.apache.cassandra.locator.SimpleStrategy'],
[u'ks2', True, u'org.apache.cassandra.locator.SimpleStrategy']])
else:
assert_all(cursor, "SELECT keyspace_name, durable_writes, strategy_class FROM system.schema_keyspaces",
[[u'ks1', False, u'org.apache.cassandra.locator.NetworkTopologyStrategy'],
[u'system', True, u'org.apache.cassandra.locator.LocalStrategy'],
[u'system_traces', True, u'org.apache.cassandra.locator.SimpleStrategy'],
[u'ks2', True, u'org.apache.cassandra.locator.SimpleStrategy']])
cursor.execute("USE ks1")
assert_invalid(cursor, "CREATE TABLE cf1 (a int PRIMARY KEY, b int) WITH compaction = { 'min_threshold' : 4 }", expected=ConfigurationException)
cursor.execute("CREATE TABLE cf1 (a int PRIMARY KEY, b int) WITH compaction = { 'class' : 'SizeTieredCompactionStrategy', 'min_threshold' : 7 }")
assert_one(cursor, "SELECT columnfamily_name, min_compaction_threshold FROM system.schema_columnfamilies WHERE keyspace_name='ks1'", ['cf1', 7])
def remove_range_slice_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
)
""")
for i in range(0, 3):
cursor.execute("INSERT INTO test (k, v) VALUES (%d, %d)" % (i, i))
cursor.execute("DELETE FROM test WHERE k = 1")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[0, 0], [2, 2]], res
def indexes_composite_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
blog_id int,
timestamp int,
author text,
content text,
PRIMARY KEY (blog_id, timestamp)
)
""")
req = "INSERT INTO test (blog_id, timestamp, author, content) VALUES (%d, %d, '%s', '%s')"
cursor.execute(req % (0, 0, "bob", "1st post"))
cursor.execute(req % (0, 1, "tom", "2nd post"))
cursor.execute(req % (0, 2, "bob", "3rd post"))
cursor.execute(req % (0, 3, "tom", "4nd post"))
cursor.execute(req % (1, 0, "bob", "5th post"))
cursor.execute("CREATE INDEX ON test(author)")
time.sleep(1)
res = cursor.execute("SELECT blog_id, timestamp FROM test WHERE author = 'bob'")
assert rows_to_list(res) == [[1, 0], [0, 0], [0, 2]], res
cursor.execute(req % (1, 1, "tom", "6th post"))
cursor.execute(req % (1, 2, "tom", "7th post"))
cursor.execute(req % (1, 3, "bob", "8th post"))
res = cursor.execute("SELECT blog_id, timestamp FROM test WHERE author = 'bob'")
assert rows_to_list(res) == [[1, 0], [1, 3], [0, 0], [0, 2]], res
cursor.execute("DELETE FROM test WHERE blog_id = 0 AND timestamp = 2")
res = cursor.execute("SELECT blog_id, timestamp FROM test WHERE author = 'bob'")
assert rows_to_list(res) == [[1, 0], [1, 3], [0, 0]], res
def refuse_in_with_indexes_test(self):
""" Test for the validation bug of #4709 """
cursor = self.prepare()
cursor.execute("create table t1 (pk varchar primary key, col1 varchar, col2 varchar);")
cursor.execute("create index t1_c1 on t1(col1);")
cursor.execute("create index t1_c2 on t1(col2);")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1a','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1b','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1c','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk2','foo2','bar2');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk3','foo3','bar3');")
assert_invalid(cursor, "select * from t1 where col2 in ('bar1', 'bar2');")
def validate_counter_regular_test(self):
""" Test for the validation bug of #4706 """
cursor = self.prepare()
assert_invalid(cursor, "CREATE TABLE test (id bigint PRIMARY KEY, count counter, things set<text>)", matching="Cannot add a counter column", expected=ConfigurationException)
def reversed_compact_test(self):
""" Test for #4716 bug and more generally for good behavior of ordering"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k text,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (c DESC);
""")
for i in range(0, 10):
cursor.execute("INSERT INTO test1(k, c, v) VALUES ('foo', %i, %i)" % (i, i))
res = cursor.execute("SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo'")
assert rows_to_list(res) == [[5], [4], [3]], res
res = cursor.execute("SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo'")
assert rows_to_list(res) == [[6], [5], [4], [3], [2]], res
res = cursor.execute("SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC")
assert rows_to_list(res) == [[3], [4], [5]], res
res = cursor.execute("SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC")
assert rows_to_list(res) == [[2], [3], [4], [5], [6]], res
res = cursor.execute("SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC")
assert rows_to_list(res) == [[5], [4], [3]], res
res = cursor.execute("SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC")
assert rows_to_list(res) == [[6], [5], [4], [3], [2]], res
cursor.execute("""
CREATE TABLE test2 (
k text,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
for i in range(0, 10):
cursor.execute("INSERT INTO test2(k, c, v) VALUES ('foo', %i, %i)" % (i, i))
res = cursor.execute("SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo'")
assert rows_to_list(res) == [[3], [4], [5]], res
res = cursor.execute("SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo'")
assert rows_to_list(res) == [[2], [3], [4], [5], [6]], res
res = cursor.execute("SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC")
assert rows_to_list(res) == [[3], [4], [5]], res
res = cursor.execute("SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC")
assert rows_to_list(res) == [[2], [3], [4], [5], [6]], res
res = cursor.execute("SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC")
assert rows_to_list(res) == [[5], [4], [3]], res
res = cursor.execute("SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC")
assert rows_to_list(res) == [[6], [5], [4], [3], [2]], res
def unescaped_string_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text PRIMARY KEY,
c text,
)
""")
#The \ in this query string is not forwarded to cassandra.
#The ' is being escaped in python, but only ' is forwarded
#over the wire instead of \'.
assert_invalid(cursor, "INSERT INTO test (k, c) VALUES ('foo', 'CQL is cassandra\'s best friend')", expected=SyntaxException)
def reversed_compact_multikey_test(self):
""" Test for the bug from #4760 and #4759 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key text,
c1 int,
c2 int,
value text,
PRIMARY KEY(key, c1, c2)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY(c1 DESC, c2 DESC);
""")
for i in range(0, 3):
for j in range(0, 3):
cursor.execute("INSERT INTO test(key, c1, c2, value) VALUES ('foo', %i, %i, 'bar');" % (i, j))
# Equalities
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1")
assert rows_to_list(res) == [[1, 2], [1, 1], [1, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1 ORDER BY c1 ASC, c2 ASC")
assert rows_to_list(res) == [[1, 0], [1, 1], [1, 2]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1 ORDER BY c1 DESC, c2 DESC")
assert rows_to_list(res) == [[1, 2], [1, 1], [1, 0]], res
# GT
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1")
assert rows_to_list(res) == [[2, 2], [2, 1], [2, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1 ORDER BY c1 ASC, c2 ASC")
assert rows_to_list(res) == [[2, 0], [2, 1], [2, 2]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1 ORDER BY c1 DESC, c2 DESC")
assert rows_to_list(res) == [[2, 2], [2, 1], [2, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1")
assert rows_to_list(res) == [[2, 2], [2, 1], [2, 0], [1, 2], [1, 1], [1, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC, c2 ASC")
assert rows_to_list(res) == [[1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC")
assert rows_to_list(res) == [[1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 DESC, c2 DESC")
assert rows_to_list(res) == [[2, 2], [2, 1], [2, 0], [1, 2], [1, 1], [1, 0]], res
# LT
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1")
assert rows_to_list(res) == [[0, 2], [0, 1], [0, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1 ORDER BY c1 ASC, c2 ASC")
assert rows_to_list(res) == [[0, 0], [0, 1], [0, 2]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1 ORDER BY c1 DESC, c2 DESC")
assert rows_to_list(res) == [[0, 2], [0, 1], [0, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1")
assert rows_to_list(res) == [[1, 2], [1, 1], [1, 0], [0, 2], [0, 1], [0, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC, c2 ASC")
assert rows_to_list(res) == [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC")
assert rows_to_list(res) == [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 DESC, c2 DESC")
assert rows_to_list(res) == [[1, 2], [1, 1], [1, 0], [0, 2], [0, 1], [0, 0]], res
def collection_and_regular_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l list<int>,
c int
)
""")
cursor.execute("INSERT INTO test(k, l, c) VALUES(3, [0, 1, 2], 4)")
cursor.execute("UPDATE test SET l[0] = 1, c = 42 WHERE k = 3")
res = cursor.execute("SELECT l, c FROM test WHERE k = 3")
self.assertItemsEqual(rows_to_list(res), [[[1, 1, 2], 42]])
def batch_and_list_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l list<int>
)
""")
cursor.execute("""
BEGIN BATCH
UPDATE test SET l = l + [ 1 ] WHERE k = 0;
UPDATE test SET l = l + [ 2 ] WHERE k = 0;
UPDATE test SET l = l + [ 3 ] WHERE k = 0;
APPLY BATCH
""")
res = cursor.execute("SELECT l FROM test WHERE k = 0")
self.assertItemsEqual(rows_to_list(res[0]), [[1, 2, 3]])
cursor.execute("""
BEGIN BATCH
UPDATE test SET l = [ 1 ] + l WHERE k = 1;
UPDATE test SET l = [ 2 ] + l WHERE k = 1;
UPDATE test SET l = [ 3 ] + l WHERE k = 1;
APPLY BATCH
""")
res = cursor.execute("SELECT l FROM test WHERE k = 1")
self.assertItemsEqual(rows_to_list(res[0]), [[3, 2, 1]])
def boolean_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k boolean PRIMARY KEY,
b boolean
)
""")
cursor.execute("INSERT INTO test (k, b) VALUES (true, false)")
res = cursor.execute("SELECT * FROM test WHERE k = true")
assert rows_to_list(res) == [[True, False]], res
def multiordering_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text,
c1 int,
c2 int,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
for i in range(0, 2):
for j in range(0, 2):
cursor.execute("INSERT INTO test(k, c1, c2) VALUES ('foo', %i, %i)" % (i, j))
res = cursor.execute("SELECT c1, c2 FROM test WHERE k = 'foo'")
assert rows_to_list(res) == [[0, 1], [0, 0], [1, 1], [1, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 ASC, c2 DESC")
assert rows_to_list(res) == [[0, 1], [0, 0], [1, 1], [1, 0]], res
res = cursor.execute("SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 DESC, c2 ASC")
assert rows_to_list(res) == [[1, 0], [1, 1], [0, 0], [0, 1]], res
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c2 DESC")
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c2 ASC")
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 ASC, c2 ASC")
def multiordering_validation_test(self):
cursor = self.prepare()
assert_invalid(cursor, "CREATE TABLE test (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2)) WITH CLUSTERING ORDER BY (c2 DESC)")
assert_invalid(cursor, "CREATE TABLE test (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2)) WITH CLUSTERING ORDER BY (c2 ASC, c1 DESC)")
assert_invalid(cursor, "CREATE TABLE test (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2)) WITH CLUSTERING ORDER BY (c1 DESC, c2 DESC, c3 DESC)")
cursor.execute("CREATE TABLE test1 (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2)) WITH CLUSTERING ORDER BY (c1 DESC, c2 DESC)")
cursor.execute("CREATE TABLE test2 (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2)) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC)")
def bug_4882_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 0, 0, 0);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 1, 1, 1);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 0, 2, 2);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 1, 3, 3);")
res = cursor.execute("select * from test where k = 0 limit 1;")
assert rows_to_list(res) == [[0, 0, 2, 2]], res
def multi_list_set_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l1 list<int>,
l2 list<int>
)
""")
cursor.execute("INSERT INTO test (k, l1, l2) VALUES (0, [1, 2, 3], [4, 5, 6])")
cursor.execute("UPDATE test SET l2[1] = 42, l1[1] = 24 WHERE k = 0")
res = cursor.execute("SELECT l1, l2 FROM test WHERE k = 0")
self.assertItemsEqual(rows_to_list(res), [[[1, 24, 3], [4, 42, 6]]])
@freshCluster()
def composite_index_collections_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE blogs (
blog_id int,
time1 int,
time2 int,
author text,
content set<text>,
PRIMARY KEY (blog_id, time1, time2)
)
""")
cursor.execute("CREATE INDEX ON blogs(author)")
req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', %s)"
cursor.execute(req % (1, 0, 0, 'foo', "{ 'bar1', 'bar2' }"))
cursor.execute(req % (1, 0, 1, 'foo', "{ 'bar2', 'bar3' }"))
cursor.execute(req % (2, 1, 0, 'foo', "{ 'baz' }"))
cursor.execute(req % (3, 0, 1, 'gux', "{ 'qux' }"))
res = cursor.execute("SELECT blog_id, content FROM blogs WHERE author='foo'")
assert rows_to_list(res) == [[1, set(['bar1', 'bar2'])], [1, set(['bar2', 'bar3'])], [2, set(['baz'])]], res
@freshCluster()
def truncate_clean_cache_test(self):
cursor = self.prepare(ordered=True, use_cache=True)
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
) WITH CACHING = ALL;
""")
for i in range(0, 3):
cursor.execute("INSERT INTO test(k, v1, v2) VALUES (%d, %d, %d)" % (i, i, i * 2))
res = cursor.execute("SELECT v1, v2 FROM test WHERE k IN (0, 1, 2)")
assert rows_to_list(res) == [[0, 0], [1, 2], [2, 4]], res
cursor.execute("TRUNCATE test")
res = cursor.execute("SELECT v1, v2 FROM test WHERE k IN (0, 1, 2)")
assert rows_to_list(res) == [], res
def allow_filtering_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
)
""")
for i in range(0, 3):
for j in range(0, 3):
cursor.execute("INSERT INTO test(k, c, v) VALUES(%d, %d, %d)" % (i, j, j))
# Don't require filtering, always allowed
queries = ["SELECT * FROM test WHERE k = 1",
"SELECT * FROM test WHERE k = 1 AND c > 2",
"SELECT * FROM test WHERE k = 1 AND c = 2"]
for q in queries:
cursor.execute(q)
cursor.execute(q + " ALLOW FILTERING")
# Require filtering, allowed only with ALLOW FILTERING
queries = ["SELECT * FROM test WHERE c = 2",
"SELECT * FROM test WHERE c > 2 AND c <= 4"]
for q in queries:
assert_invalid(cursor, q)
cursor.execute(q + " ALLOW FILTERING")
cursor.execute("""
CREATE TABLE indexed (
k int PRIMARY KEY,
a int,
b int,
)
""")
cursor.execute("CREATE INDEX ON indexed(a)")
for i in range(0, 5):
cursor.execute("INSERT INTO indexed(k, a, b) VALUES(%d, %d, %d)" % (i, i * 10, i * 100))
# Don't require filtering, always allowed
queries = ["SELECT * FROM indexed WHERE k = 1",
"SELECT * FROM indexed WHERE a = 20"]
for q in queries:
cursor.execute(q)
cursor.execute(q + " ALLOW FILTERING")
# Require filtering, allowed only with ALLOW FILTERING
queries = ["SELECT * FROM indexed WHERE a = 20 AND b = 200"]
for q in queries:
assert_invalid(cursor, q)
cursor.execute(q + " ALLOW FILTERING")
def range_with_deletes_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int,
)
""")
nb_keys = 30
nb_deletes = 5
for i in range(0, nb_keys):
cursor.execute("INSERT INTO test(k, v) VALUES (%d, %d)" % (i, i))
for i in random.sample(xrange(nb_keys), nb_deletes):
cursor.execute("DELETE FROM test WHERE k = %d" % i)
res = cursor.execute("SELECT * FROM test LIMIT %d" % (nb_keys / 2))
assert len(res) == nb_keys / 2, "Expected %d but got %d" % (nb_keys / 2, len(res))
def alter_with_collections_test(self):
""" Test you can add columns in a table with collections (#4982 bug) """
cursor = self.prepare()
cursor.execute("CREATE TABLE collections (key int PRIMARY KEY, aset set<text>)")
cursor.execute("ALTER TABLE collections ADD c text")
cursor.execute("ALTER TABLE collections ADD alist list<text>")
def collection_compact_test(self):
cursor = self.prepare()
assert_invalid(cursor, """
CREATE TABLE test (
user ascii PRIMARY KEY,
mails list<text>
) WITH COMPACT STORAGE;
""")
def collection_function_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l set<int>
)
""")
assert_invalid(cursor, "SELECT ttl(l) FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT writetime(l) FROM test WHERE k = 0")
def collection_counter_test(self):
cursor = self.prepare()
assert_invalid(cursor, """
CREATE TABLE test (
k int PRIMARY KEY,
l list<counter>
)
""", expected=(InvalidRequest, SyntaxException))
assert_invalid(cursor, """
CREATE TABLE test (
k int PRIMARY KEY,
s set<counter>
)
""", expected=(InvalidRequest, SyntaxException))
assert_invalid(cursor, """
CREATE TABLE test (
k int PRIMARY KEY,
m map<text, counter>
)
""", expected=(InvalidRequest, SyntaxException))
def composite_partition_key_validation_test(self):
""" Test for bug from #5122 """
cursor = self.prepare()
cursor.execute("CREATE TABLE foo (a int, b text, c uuid, PRIMARY KEY ((a, b)));")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'aze', 4d481800-4c5f-11e1-82e0-3f484de45426)")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'ert', 693f5800-8acb-11e3-82e0-3f484de45426)")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'opl', d4815800-2d8d-11e0-82e0-3f484de45426)")
res = cursor.execute("SELECT * FROM foo")
assert len(res) == 3, res
assert_invalid(cursor, "SELECT * FROM foo WHERE a=1")
@since('3.0')
def multi_in_test(self):
self.__multi_in(False)
@since('3.0')
def multi_in_compact_test(self):
self.__multi_in(True)
def __multi_in(self, compact):
cursor = self.prepare()
data = [
('test', '06029', 'CT', 9, 'Ellington'),
('test', '06031', 'CT', 9, 'Falls Village'),
('test', '06902', 'CT', 9, 'Stamford'),
('test', '06927', 'CT', 9, 'Stamford'),
('test', '10015', 'NY', 36, 'New York'),
('test', '07182', 'NJ', 34, 'Newark'),
('test', '73301', 'TX', 48, 'Austin'),
('test', '94102', 'CA', 06, 'San Francisco'),
('test2', '06029', 'CT', 9, 'Ellington'),
('test2', '06031', 'CT', 9, 'Falls Village'),
('test2', '06902', 'CT', 9, 'Stamford'),
('test2', '06927', 'CT', 9, 'Stamford'),
('test2', '10015', 'NY', 36, 'New York'),
('test2', '07182', 'NJ', 34, 'Newark'),
('test2', '73301', 'TX', 48, 'Austin'),
('test2', '94102', 'CA', 06, 'San Francisco'),
]
create = """
CREATE TABLE zipcodes (
group text,
zipcode text,
state text,
fips_regions int,
city text,
PRIMARY KEY(group,zipcode,state,fips_regions)
)"""
if compact:
create = create + " WITH COMPACT STORAGE"
cursor.execute(create)
for d in data:
cursor.execute("INSERT INTO zipcodes (group, zipcode, state, fips_regions, city) VALUES ('%s', '%s', '%s', %i, '%s')" % d)
res = cursor.execute("select zipcode from zipcodes")
assert len(res) == 16, res
res = cursor.execute("select zipcode from zipcodes where group='test'")
assert len(res) == 8, res
assert_invalid(cursor, "select zipcode from zipcodes where zipcode='06902'")
res = cursor.execute("select zipcode from zipcodes where zipcode='06902' ALLOW FILTERING")
assert len(res) == 2, res
res = cursor.execute("select zipcode from zipcodes where group='test' and zipcode='06902'")
assert len(res) == 1, res
res = cursor.execute("select zipcode from zipcodes where group='test' and zipcode IN ('06902','73301','94102')")
assert len(res) == 3, res
res = cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA')")
assert len(res) == 2, res
res = cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions = 9")
assert len(res) == 1, res
res = cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') ORDER BY zipcode DESC")
assert len(res) == 2, res
res = cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions > 0")
assert len(res) == 2, res
res = cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions < 0")
assert len(res) == 0, res
@since('3.0')
def multi_in_compact_non_composite_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key int,
c int,
v int,
PRIMARY KEY (key, c)
) WITH COMPACT STORAGE
""")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 1, 1)")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 2, 2)")
res = cursor.execute("SELECT * FROM test WHERE key=0 AND c IN (0, 2)")
assert rows_to_list(res) == [[0, 0, 0], [0, 2, 2]], res
def large_clustering_in_test(self):
# Test for CASSANDRA-8410
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
)
""")
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, 0, 0)")
p = cursor.prepare("SELECT * FROM test WHERE k=? AND c IN ?")
in_values = list(range(10000))
rows = cursor.execute(p, [0, in_values])
self.assertEqual(1, len(rows))
self.assertEqual((0, 0, 0), rows[0])
@since('1.2.1')
def timeuuid_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
t timeuuid,
PRIMARY KEY (k, t)
)
""")
assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, 2012-11-07 18:18:22-0800)", expected=SyntaxException)
for i in range(4):
cursor.execute("INSERT INTO test (k, t) VALUES (0, now())")
time.sleep(1)
res = cursor.execute("SELECT * FROM test")
assert len(res) == 4, res
dates = [d[1] for d in res]
res = cursor.execute("SELECT * FROM test WHERE k = 0 AND t >= %s" % dates[0])
assert len(res) == 4, res
res = cursor.execute("SELECT * FROM test WHERE k = 0 AND t < %s" % dates[0])
assert len(res) == 0, res
res = cursor.execute("SELECT * FROM test WHERE k = 0 AND t > %s AND t <= %s" % (dates[0], dates[2]))
assert len(res) == 2, res
res = cursor.execute("SELECT * FROM test WHERE k = 0 AND t = %s" % dates[0])
assert len(res) == 1, res
assert_invalid(cursor, "SELECT dateOf(k) FROM test WHERE k = 0 AND t = %s" % dates[0])
cursor.execute("SELECT dateOf(t), unixTimestampOf(t) FROM test WHERE k = 0 AND t = %s" % dates[0])
cursor.execute("SELECT t FROM test WHERE k = 0 AND t > maxTimeuuid(1234567) AND t < minTimeuuid('2012-11-07 18:18:22-0800')")
# not sure what to check exactly so just checking the query returns
def float_with_exponent_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
d double,
f float
)
""")
cursor.execute("INSERT INTO test(k, d, f) VALUES (0, 3E+10, 3.4E3)")
cursor.execute("INSERT INTO test(k, d, f) VALUES (1, 3.E10, -23.44E-3)")
cursor.execute("INSERT INTO test(k, d, f) VALUES (2, 3, -2)")
def compact_metadata_test(self):
""" Test regression from #5189 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE bar (
id int primary key,
i int
) WITH COMPACT STORAGE;
""")
cursor.execute("INSERT INTO bar (id, i) VALUES (1, 2);")
res = cursor.execute("SELECT * FROM bar")
assert rows_to_list(res) == [[1, 2]], res
@since('2.0')
def clustering_indexing_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE posts (
id1 int,
id2 int,
author text,
time bigint,
v1 text,
v2 text,
PRIMARY KEY ((id1, id2), author, time)
)
""")
cursor.execute("CREATE INDEX ON posts(time)")
cursor.execute("CREATE INDEX ON posts(id2)")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'bob', 0, 'A', 'A')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'bob', 1, 'B', 'B')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 1, 'bob', 2, 'C', 'C')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'tom', 0, 'D', 'D')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 1, 'tom', 1, 'E', 'E')")
res = cursor.execute("SELECT v1 FROM posts WHERE time = 1")
assert rows_to_list(res) == [['B'], ['E']], res
res = cursor.execute("SELECT v1 FROM posts WHERE id2 = 1")
assert rows_to_list(res) == [['C'], ['E']], res
res = cursor.execute("SELECT v1 FROM posts WHERE id1 = 0 AND id2 = 0 AND author = 'bob' AND time = 0")
assert rows_to_list(res) == [['A']], res
# Test for CASSANDRA-8206
cursor.execute("UPDATE posts SET v2 = null WHERE id1 = 0 AND id2 = 0 AND author = 'bob' AND time = 1")
res = cursor.execute("SELECT v1 FROM posts WHERE id2 = 0")
assert rows_to_list(res) == [['A'], ['B'], ['D']], res
res = cursor.execute("SELECT v1 FROM posts WHERE time = 1")
assert rows_to_list(res) == [['B'], ['E']], res
@since('2.0')
def invalid_clustering_indexing_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test1 (a int, b int, c int, d int, PRIMARY KEY ((a, b))) WITH COMPACT STORAGE")
assert_invalid(cursor, "CREATE INDEX ON test1(a)")
assert_invalid(cursor, "CREATE INDEX ON test1(b)")
cursor.execute("CREATE TABLE test2 (a int, b int, c int, PRIMARY KEY (a, b)) WITH COMPACT STORAGE")
assert_invalid(cursor, "CREATE INDEX ON test2(a)")
assert_invalid(cursor, "CREATE INDEX ON test2(b)")
assert_invalid(cursor, "CREATE INDEX ON test2(c)")
cursor.execute("CREATE TABLE test3 (a int, b int, c int static , PRIMARY KEY (a, b))")
assert_invalid(cursor, "CREATE INDEX ON test3(c)")
@since('2.0')
def edge_2i_on_complex_pk_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE indexed (
pk0 int,
pk1 int,
ck0 int,
ck1 int,
ck2 int,
value int,
PRIMARY KEY ((pk0, pk1), ck0, ck1, ck2)
)
""")
cursor.execute("CREATE INDEX ON indexed(pk0)")
cursor.execute("CREATE INDEX ON indexed(ck0)")
cursor.execute("CREATE INDEX ON indexed(ck1)")
cursor.execute("CREATE INDEX ON indexed(ck2)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (0, 1, 2, 3, 4, 5)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (1, 2, 3, 4, 5, 0)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (2, 3, 4, 5, 0, 1)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (3, 4, 5, 0, 1, 2)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (4, 5, 0, 1, 2, 3)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (5, 0, 1, 2, 3, 4)")
res = cursor.execute("SELECT value FROM indexed WHERE pk0 = 2")
self.assertEqual([[1]], rows_to_list(res))
res = cursor.execute("SELECT value FROM indexed WHERE ck0 = 0")
self.assertEqual([[3]], rows_to_list(res))
res = cursor.execute("SELECT value FROM indexed WHERE pk0 = 3 AND pk1 = 4 AND ck1 = 0")
self.assertEqual([[2]], rows_to_list(res))
res = cursor.execute("SELECT value FROM indexed WHERE pk0 = 5 AND pk1 = 0 AND ck0 = 1 AND ck2 = 3 ALLOW FILTERING")
self.assertEqual([[4]], rows_to_list(res))
def bug_5240_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
interval text,
seq int,
id int,
severity int,
PRIMARY KEY ((interval, seq), id)
) WITH CLUSTERING ORDER BY (id DESC);
""")
cursor.execute("CREATE INDEX ON test(severity);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 1, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 2, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 3, 2);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 4, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 1, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 2, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 3, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 4, 2);")
res = cursor.execute("select * from test where severity = 3 and interval = 't' and seq =1;")
assert rows_to_list(res) == [['t', 1, 4, 3]], res
def ticket_5230_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE foo (
key text,
c text,
v text,
PRIMARY KEY (key, c)
)
""")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '1', '1')")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '2', '2')")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '3', '3')")
res = cursor.execute("SELECT c FROM foo WHERE key = 'foo' AND c IN ('1', '2');")
assert rows_to_list(res) == [['1'], ['2']], res
def conversion_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
i varint,
b blob
)
""")
cursor.execute("INSERT INTO test(k, i, b) VALUES (0, blobAsVarint(bigintAsBlob(3)), textAsBlob('foobar'))")
res = cursor.execute("SELECT i, blobAsText(b) FROM test WHERE k = 0")
assert rows_to_list(res) == [[3, 'foobar']], res
def alter_bug_test(self):
""" Test for bug of 5232 """
cursor = self.prepare()
cursor.execute("CREATE TABLE t1 (id int PRIMARY KEY, t text);")
cursor.execute("UPDATE t1 SET t = '111' WHERE id = 1;")
cursor.execute("ALTER TABLE t1 ADD l list<text>;")
time.sleep(.5)
res = cursor.execute("SELECT * FROM t1;")
assert rows_to_list(res) == [[1, None, '111']], res
cursor.execute("ALTER TABLE t1 ADD m map<int, text>;")
time.sleep(.5)
res = cursor.execute("SELECT * FROM t1;")
assert rows_to_list(res) == [[1, None, None, '111']], res
def bug_5376(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key text,
c bigint,
v text,
x set<text>,
PRIMARY KEY (key, c)
);
""")
assert_invalid(cursor, "select * from test where key = 'foo' and c in (1,3,4);")
def function_and_reverse_type_test(self):
""" Test for #5386 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c timeuuid,
v int,
PRIMARY KEY (k, c)
) WITH CLUSTERING ORDER BY (c DESC)
""")
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, now(), 0);")
def bug_5404(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (key text PRIMARY KEY)")
# We just want to make sure this doesn't NPE server side
assert_invalid(cursor, "select * from test where token(key) > token(int(3030343330393233)) limit 1;")
def empty_blob_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, b blob)")
cursor.execute("INSERT INTO test (k, b) VALUES (0, 0x)")
res = cursor.execute("SELECT * FROM test")
assert rows_to_list(res) == [[0, '']], res
@since('2.0')
def rename_test(self):
cursor = self.prepare()
# The goal is to test renaming from an old cli value
cli = self.cluster.nodelist()[0].cli()
cli.do("use ks")
cli.do("create column family test with comparator='CompositeType(Int32Type, Int32Type, Int32Type)' "
+ "and key_validation_class=UTF8Type and default_validation_class=UTF8Type")
cli.do("set test['foo']['4:3:2'] = 'bar'")
assert not cli.has_errors(), cli.errors()
time.sleep(1)
cursor.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3")
assert_one(cursor, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
def clustering_order_and_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
t timeuuid,
PRIMARY KEY (k, t)
) WITH CLUSTERING ORDER BY (t DESC)
""")
for i in range(0, 5):
cursor.execute("INSERT INTO test (k, t) VALUES (%d, now())" % i)
cursor.execute("SELECT dateOf(t) FROM test")
@since('2.0')
def conditional_update_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 text,
v3 int
)
""")
# Shouldn't apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF v1 = 4", [False])
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF EXISTS", [False])
# Should apply
assert_one(cursor, "INSERT INTO test (k, v1, v2) VALUES (0, 2, 'foo') IF NOT EXISTS", [True])
# Shouldn't apply
assert_one(cursor, "INSERT INTO test (k, v1, v2) VALUES (0, 5, 'bar') IF NOT EXISTS", [False, 0, 2, 'foo', None])
assert_one(cursor, "SELECT * FROM test", [0, 2, 'foo', None])
# Should not apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF v1 = 4", [False, 2])
assert_one(cursor, "SELECT * FROM test", [0, 2, 'foo', None])
# Should apply (note: we want v2 before v1 in the statement order to exercise #5786)
assert_one(cursor, "UPDATE test SET v2 = 'bar', v1 = 3 WHERE k = 0 IF v1 = 2", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar', v1 = 3 WHERE k = 0 IF EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [0, 3, 'bar', None])
# Shouldn't apply, only one condition is ok
assert_one(cursor, "UPDATE test SET v1 = 5, v2 = 'foobar' WHERE k = 0 IF v1 = 3 AND v2 = 'foo'", [False, 3, 'bar'])
assert_one(cursor, "SELECT * FROM test", [0, 3, 'bar', None])
# Should apply
assert_one(cursor, "UPDATE test SET v1 = 5, v2 = 'foobar' WHERE k = 0 IF v1 = 3 AND v2 = 'bar'", [True])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None])
# Shouldn't apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = 3", [False, 5])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None])
# Shouldn't apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = null", [False, 5])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None])
# Should apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = 5", [True])
assert_one(cursor, "SELECT * FROM test", [0, 5, None, None])
# Shouln't apply
assert_one(cursor, "DELETE v1 FROM test WHERE k = 0 IF v3 = 4", [False, None])
# Should apply
assert_one(cursor, "DELETE v1 FROM test WHERE k = 0 IF v3 = null", [True])
assert_one(cursor, "SELECT * FROM test", [0, None, None, None])
# Should apply
assert_one(cursor, "DELETE FROM test WHERE k = 0 IF v1 = null", [True])
assert_none(cursor, "SELECT * FROM test")
# Shouldn't apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF EXISTS", [False])
if self.cluster.version() > "2.1.1":
# Should apply
assert_one(cursor, "DELETE FROM test WHERE k = 0 IF v1 IN (null)", [True])
@since('2.1.1')
def non_eq_conditional_update_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 text,
v3 int
)
""")
# non-EQ conditions
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (0, 2, 'foo')")
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 < 3", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 <= 3", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 > 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 >= 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 != 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 != 2", [False, 2])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN (0, 1, 2)", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN (142, 276)", [False, 2])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN ()", [False, 2])
@since('2.0.7')
def conditional_delete_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
)
""")
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [False])
cursor.execute("INSERT INTO test (k, v1) VALUES (1, 2)")
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [True])
assert_none(cursor, "SELECT * FROM test WHERE k=1")
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [False])
cursor.execute("UPDATE test USING TTL 1 SET v1=2 WHERE k=1")
time.sleep(1.5)
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [False])
assert_none(cursor, "SELECT * FROM test WHERE k=1")
cursor.execute("INSERT INTO test (k, v1) VALUES (2, 2) USING TTL 1")
time.sleep(1.5)
assert_one(cursor, "DELETE FROM test WHERE k=2 IF EXISTS", [False])
assert_none(cursor, "SELECT * FROM test WHERE k=2")
cursor.execute("INSERT INTO test (k, v1) VALUES (3, 2)")
assert_one(cursor, "DELETE v1 FROM test WHERE k=3 IF EXISTS", [True])
assert_one(cursor, "SELECT * FROM test WHERE k=3", [3, None])
assert_one(cursor, "DELETE v1 FROM test WHERE k=3 IF EXISTS", [True])
assert_one(cursor, "DELETE FROM test WHERE k=3 IF EXISTS", [True])
# static columns
cursor.execute("""
CREATE TABLE test2 (
k text,
s text static,
i int,
v text,
PRIMARY KEY (k, i)
)""")
cursor.execute("INSERT INTO test2 (k, s, i, v) VALUES ('k', 's', 0, 'v')")
assert_one(cursor, "DELETE v FROM test2 WHERE k='k' AND i=0 IF EXISTS", [True])
assert_one(cursor, "DELETE FROM test2 WHERE k='k' AND i=0 IF EXISTS", [True])
assert_one(cursor, "DELETE v FROM test2 WHERE k='k' AND i=0 IF EXISTS", [False])
assert_one(cursor, "DELETE FROM test2 WHERE k='k' AND i=0 IF EXISTS", [False])
# CASSANDRA-6430
v = self.cluster.version()
if v >= "2.1.1" or v < "2.1" and v >= "2.0.11":
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 'k' IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 'k' IF v = 'foo'")
assert_invalid(cursor, "DELETE FROM test2 WHERE i = 0 IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF v = 'foo'")
@freshCluster()
def range_key_ordered_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("CREATE TABLE test ( k int PRIMARY KEY)")
cursor.execute("INSERT INTO test(k) VALUES (-1)")
cursor.execute("INSERT INTO test(k) VALUES ( 0)")
cursor.execute("INSERT INTO test(k) VALUES ( 1)")
assert_all(cursor, "SELECT * FROM test", [[0], [1], [-1]])
assert_invalid(cursor, "SELECT * FROM test WHERE k >= -1 AND k < 1;")
@since('2.0')
def select_with_alias_test(self):
cursor = self.prepare()
cursor.execute('CREATE TABLE users (id int PRIMARY KEY, name text)')
for id in range(0, 5):
cursor.execute("INSERT INTO users (id, name) VALUES (%d, 'name%d') USING TTL 10 AND TIMESTAMP 0" % (id, id))
# test aliasing count(*)
res = cursor.execute('SELECT count(*) AS user_count FROM users')
self.assertEqual('user_count', res[0]._fields[0])
self.assertEqual(5, res[0].user_count)
# test aliasing regular value
res = cursor.execute('SELECT name AS user_name FROM users WHERE id = 0')
self.assertEqual('user_name', res[0]._fields[0])
self.assertEqual('name0', res[0].user_name)
# test aliasing writetime
res = cursor.execute('SELECT writeTime(name) AS name_writetime FROM users WHERE id = 0')
self.assertEqual('name_writetime', res[0]._fields[0])
self.assertEqual(0, res[0].name_writetime)
# test aliasing ttl
res = cursor.execute('SELECT ttl(name) AS name_ttl FROM users WHERE id = 0')
self.assertEqual('name_ttl', res[0]._fields[0])
assert res[0].name_ttl in (9, 10)
# test aliasing a regular function
res = cursor.execute('SELECT intAsBlob(id) AS id_blob FROM users WHERE id = 0')
self.assertEqual('id_blob', res[0]._fields[0])
self.assertEqual('\x00\x00\x00\x00', res[0].id_blob)
# test that select throws a meaningful exception for aliases in where clause
assert_invalid(cursor, 'SELECT id AS user_id, name AS user_name FROM users WHERE user_id = 0', matching="Aliases aren't allowed in the where clause")
# test that select throws a meaningful exception for aliases in order by clause
assert_invalid(cursor, 'SELECT id AS user_id, name AS user_name FROM users WHERE id IN (0) ORDER BY user_name', matching="Aliases are not allowed in order by clause")
def nonpure_function_collection_test(self):
""" Test for bug #5795 """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<timeuuid>)")
# we just want to make sure this doesn't throw
cursor.execute("INSERT INTO test(k, v) VALUES (0, [now()])")
def empty_in_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k1 int, k2 int, v int, PRIMARY KEY (k1, k2))")
def fill(table):
for i in range(0, 2):
for j in range(0, 2):
cursor.execute("INSERT INTO %s (k1, k2, v) VALUES (%d, %d, %d)" % (table, i, j, i + j))
def assert_nothing_changed(table):
res = cursor.execute("SELECT * FROM %s" % table) # make sure nothing got removed
self.assertEqual([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 2]], rows_to_list(sorted(res)))
# Inserts a few rows to make sure we don't actually query something
fill("test")
# Test empty IN () in SELECT
assert_none(cursor, "SELECT v FROM test WHERE k1 IN ()")
assert_none(cursor, "SELECT v FROM test WHERE k1 = 0 AND k2 IN ()")
# Test empty IN () in DELETE
cursor.execute("DELETE FROM test WHERE k1 IN ()")
assert_nothing_changed("test")
# Test empty IN () in UPDATE
cursor.execute("UPDATE test SET v = 3 WHERE k1 IN () AND k2 = 2")
assert_nothing_changed("test")
# Same test, but for compact
cursor.execute("CREATE TABLE test_compact (k1 int, k2 int, v int, PRIMARY KEY (k1, k2)) WITH COMPACT STORAGE")
fill("test_compact")
assert_none(cursor, "SELECT v FROM test_compact WHERE k1 IN ()")
assert_none(cursor, "SELECT v FROM test_compact WHERE k1 = 0 AND k2 IN ()")
# Test empty IN () in DELETE
cursor.execute("DELETE FROM test_compact WHERE k1 IN ()")
assert_nothing_changed("test_compact")
# Test empty IN () in UPDATE
cursor.execute("UPDATE test_compact SET v = 3 WHERE k1 IN () AND k2 = 2")
assert_nothing_changed("test_compact")
def collection_flush_test(self):
""" Test for 5805 bug """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, s set<int>)")
cursor.execute("INSERT INTO test(k, s) VALUES (1, {1})")
self.cluster.flush()
cursor.execute("INSERT INTO test(k, s) VALUES (1, {2})")
self.cluster.flush()
assert_one(cursor, "SELECT * FROM test", [1, set([2])])
@since('2.0.1')
def select_distinct_test(self):
cursor = self.prepare()
# Test a regular (CQL3) table.
cursor.execute('CREATE TABLE regular (pk0 int, pk1 int, ck0 int, val int, PRIMARY KEY((pk0, pk1), ck0))')
for i in xrange(0, 3):
cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 0, 0)' % (i, i))
cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 1, 1)' % (i, i))
res = cursor.execute('SELECT DISTINCT pk0, pk1 FROM regular LIMIT 1')
self.assertEqual([[0, 0]], rows_to_list(res))
res = cursor.execute('SELECT DISTINCT pk0, pk1 FROM regular LIMIT 3')
self.assertEqual([[0, 0], [1, 1], [2, 2]], rows_to_list(sorted(res)))
# Test a 'compact storage' table.
cursor.execute('CREATE TABLE compact (pk0 int, pk1 int, val int, PRIMARY KEY((pk0, pk1))) WITH COMPACT STORAGE')
for i in xrange(0, 3):
cursor.execute('INSERT INTO compact (pk0, pk1, val) VALUES (%d, %d, %d)' % (i, i, i))
res = cursor.execute('SELECT DISTINCT pk0, pk1 FROM compact LIMIT 1')
self.assertEqual([[0, 0]], rows_to_list(res))
res = cursor.execute('SELECT DISTINCT pk0, pk1 FROM compact LIMIT 3')
self.assertEqual([[0, 0], [1, 1], [2, 2]], rows_to_list(sorted(res)))
# Test a 'wide row' thrift table.
cursor.execute('CREATE TABLE wide (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE')
for i in xrange(0, 3):
cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name0', 0)" % i)
cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name1', 1)" % i)
res = cursor.execute('SELECT DISTINCT pk FROM wide LIMIT 1')
self.assertEqual([[1]], rows_to_list(res))
res = cursor.execute('SELECT DISTINCT pk FROM wide LIMIT 3')
self.assertEqual([[0], [1], [2]], rows_to_list(sorted(res)))
# Test selection validation.
assert_invalid(cursor, 'SELECT DISTINCT pk0 FROM regular', matching="queries must request all the partition key columns")
assert_invalid(cursor, 'SELECT DISTINCT pk0, pk1, ck0 FROM regular', matching="queries must only request partition key columns")
def select_distinct_with_deletions_test(self):
cursor = self.prepare()
cursor.execute('CREATE TABLE t1 (k int PRIMARY KEY, c int, v int)')
for i in range(10):
cursor.execute('INSERT INTO t1 (k, c, v) VALUES (%d, %d, %d)' % (i, i, i))
rows = cursor.execute('SELECT DISTINCT k FROM t1')
self.assertEqual(10, len(rows))
key_to_delete = rows[3].k
cursor.execute('DELETE FROM t1 WHERE k=%d' % (key_to_delete,))
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
self.assertEqual(9, len(rows))
rows = list(cursor.execute('SELECT DISTINCT k FROM t1 LIMIT 5'))
self.assertEqual(5, len(rows))
cursor.default_fetch_size = 5
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
self.assertEqual(9, len(rows))
def function_with_null_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
t timeuuid,
)
""")
cursor.execute("INSERT INTO test(k) VALUES (0)")
assert_one(cursor, "SELECT dateOf(t) FROM test WHERE k=0", [None])
@freshCluster()
def cas_simple_test(self):
cursor = self.prepare(nodes=3, rf=3)
cursor.execute("CREATE TABLE tkns (tkn int, consumed boolean, PRIMARY KEY (tkn));")
for i in range(1, 10):
query = SimpleStatement("INSERT INTO tkns (tkn, consumed) VALUES (%i,FALSE);" % i, consistency_level=ConsistencyLevel.QUORUM)
cursor.execute(query)
assert_one(cursor, "UPDATE tkns SET consumed = TRUE WHERE tkn = %i IF consumed = FALSE;" % i, [True], cl=ConsistencyLevel.QUORUM)
assert_one(cursor, "UPDATE tkns SET consumed = TRUE WHERE tkn = %i IF consumed = FALSE;" % i, [False, True], cl=ConsistencyLevel.QUORUM)
def bug_6050_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
a int,
b int
)
""")
cursor.execute("CREATE INDEX ON test(a)")
assert_invalid(cursor, "SELECT * FROM test WHERE a = 3 AND b IN (1, 3)")
@since('2.0')
def bug_6069_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
s set<int>
)
""")
assert_one(cursor, "INSERT INTO test(k, s) VALUES (0, {1, 2, 3}) IF NOT EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [0, {1, 2, 3}])
def bug_6115_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, v int, PRIMARY KEY (k, v))")
cursor.execute("INSERT INTO test (k, v) VALUES (0, 1)")
cursor.execute("BEGIN BATCH DELETE FROM test WHERE k=0 AND v=1; INSERT INTO test (k, v) VALUES (0, 2); APPLY BATCH")
assert_one(cursor, "SELECT * FROM test", [0, 2])
def secondary_index_counters(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, c counter)")
assert_invalid(cursor, "CREATE INDEX ON test(c)")
def column_name_validation_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text,
c int,
v timeuuid,
PRIMARY KEY (k, c)
)
""")
assert_invalid(cursor, "INSERT INTO test(k, c) VALUES ('', 0)")
# Insert a value that don't fit 'int'
assert_invalid(cursor, "INSERT INTO test(k, c) VALUES (0, 10000000000)")
# Insert a non-version 1 uuid
assert_invalid(cursor, "INSERT INTO test(k, c, v) VALUES (0, 0, 550e8400-e29b-41d4-a716-446655440000)")
@since('2.1')
def user_types_test(self):
cursor = self.prepare()
userID_1 = uuid4()
stmt = """
CREATE TYPE address (
street text,
city text,
zip_code int,
phones set<text>
)
"""
cursor.execute(stmt)
stmt = """
CREATE TYPE fullname (
firstname text,
lastname text
)
"""
cursor.execute(stmt)
stmt = """
CREATE TABLE users (
id uuid PRIMARY KEY,
name frozen<fullname>,
addresses map<text, frozen<address>>
)
"""
cursor.execute(stmt)
stmt = """
INSERT INTO users (id, name)
VALUES ({id}, {{ firstname: 'Paul', lastname: 'smith'}});
""".format(id=userID_1)
cursor.execute(stmt)
stmt = """
SELECT name.firstname FROM users WHERE id = {id}
""".format(id=userID_1)
res = cursor.execute(stmt)
self.assertEqual(['Paul'], list(res[0]))
stmt = """
UPDATE users
SET addresses = addresses + {{ 'home': {{ street: '...', city: 'SF', zip_code: 94102, phones: {{}} }} }}
WHERE id={id};
""".format(id=userID_1)
cursor.execute(stmt)
stmt = """
SELECT addresses FROM users WHERE id = {id}
""".format(id=userID_1)
res = cursor.execute(stmt)
## TODO: deserialize the value here and check it's right.
@since('2.1')
def more_user_types_test(self):
""" user type test that does a little more nesting"""
cursor = self.prepare()
cursor.execute("""
CREATE TYPE type1 (
s set<text>,
m map<text, text>,
l list<text>
)
""")
cursor.execute("""
CREATE TYPE type2 (
s set<frozen<type1>>,
)
""")
cursor.execute("""
CREATE TABLE test (id int PRIMARY KEY, val frozen<type2>)
""")
cursor.execute("INSERT INTO test(id, val) VALUES (0, { s : {{ s : {'foo', 'bar'}, m : { 'foo' : 'bar' }, l : ['foo', 'bar']} }})")
# TODO: check result once we have an easy way to do it. For now we just check it doesn't crash
cursor.execute("SELECT * FROM test")
@since('1.2')
def bug_6327_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k, v)
)
""")
cursor.execute("INSERT INTO test (k, v) VALUES (0, 0)")
self.cluster.flush()
assert_one(cursor, "SELECT v FROM test WHERE k=0 AND v IN (1, 0)", [0])
@since('1.2')
def large_count_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k)
)
""")
cursor.default_fetch_size = 10000
# We know we page at 10K, so test counting just before, at 10K, just after and
# a bit after that.
for k in range(1, 10000):
cursor.execute("INSERT INTO test(k) VALUES (%d)" % k)
assert_one(cursor, "SELECT COUNT(*) FROM test", [9999])
cursor.execute("INSERT INTO test(k) VALUES (%d)" % 10000)
assert_one(cursor, "SELECT COUNT(*) FROM test", [10000])
cursor.execute("INSERT INTO test(k) VALUES (%d)" % 10001)
assert_one(cursor, "SELECT COUNT(*) FROM test", [10001])
for k in range(10002, 15001):
cursor.execute("INSERT INTO test(k) VALUES (%d)" % k)
assert_one(cursor, "SELECT COUNT(*) FROM test", [15000])
@since('2.1')
def collection_indexing_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
l list<int>,
s set<text>,
m map<text, int>,
PRIMARY KEY (k, v)
)
""")
cursor.execute("CREATE INDEX ON test(l)")
cursor.execute("CREATE INDEX ON test(s)")
cursor.execute("CREATE INDEX ON test(m)")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 0, [1, 2], {'a'}, {'a' : 1})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 1, [3, 4], {'b', 'c'}, {'a' : 1, 'b' : 2})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 2, [1], {'a', 'c'}, {'c' : 3})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (1, 0, [1, 2, 4], {}, {'b' : 1})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (1, 1, [4, 5], {'d'}, {'a' : 1, 'b' : 3})")
# lists
assert_all(cursor, "SELECT k, v FROM test WHERE l CONTAINS 1", [[1, 0], [0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND l CONTAINS 1", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE l CONTAINS 2", [[1, 0], [0, 0]])
assert_none(cursor, "SELECT k, v FROM test WHERE l CONTAINS 6")
# sets
assert_all(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'a'", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND s CONTAINS 'a'", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'd'", [[1, 1]])
assert_none(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'e'")
# maps
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS 1", [[1, 0], [1, 1], [0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND m CONTAINS 1", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS 2", [[0, 1]])
assert_none(cursor, "SELECT k, v FROM test WHERE m CONTAINS 4")
@since('2.1')
def map_keys_indexing(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
m map<text, int>,
PRIMARY KEY (k, v)
)
""")
cursor.execute("CREATE INDEX ON test(keys(m))")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 0, {'a' : 1})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 1, {'a' : 1, 'b' : 2})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 2, {'c' : 3})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (1, 0, {'b' : 1})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (1, 1, {'a' : 1, 'b' : 3})")
# maps
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'a'", [[1, 1], [0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND m CONTAINS KEY 'a'", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'c'", [[0, 2]])
assert_none(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'd'")
# we're not allowed to create a value index if we already have a key one
assert_invalid(cursor, "CREATE INDEX ON test(m)")
#def nan_infinity_test(self):
# cursor = self.prepare()
# cursor.execute("CREATE TABLE test (f float PRIMARY KEY)")
# cursor.execute("INSERT INTO test(f) VALUES (NaN)")
# cursor.execute("INSERT INTO test(f) VALUES (-NaN)")
# cursor.execute("INSERT INTO test(f) VALUES (Infinity)")
# cursor.execute("INSERT INTO test(f) VALUES (-Infinity)")
# assert_all(cursor, "SELECT * FROM test", [[nan], [inf], [-inf]])
@since('2.0')
def static_columns_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
v int,
PRIMARY KEY (k, p)
)
""")
cursor.execute("INSERT INTO test(k, s) VALUES (0, 42)")
assert_one(cursor, "SELECT * FROM test", [0, None, 42, None])
# Check that writetime works (#7081) -- we can't predict the exact value easily so
# we just check that it's non zero
row = cursor.execute("SELECT s, writetime(s) FROM test WHERE k=0")
assert list(row[0])[0] == 42 and list(row[0])[1] > 0, row
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 0, 12, 0)")
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 1, 24, 1)")
# Check the static columns in indeed "static"
assert_all(cursor, "SELECT * FROM test", [[0, 0, 24, 0], [0, 1, 24, 1]])
# Check we do correctly get the static column value with a SELECT *, even
# if we're only slicing part of the partition
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=0", [0, 0, 24, 0])
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=1", [0, 1, 24, 1])
# Test for IN on the clustering key (#6769)
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND p IN (0, 1)", [[0, 0, 24, 0], [0, 1, 24, 1]])
# Check things still work if we don't select the static column. We also want
# this to not request the static columns internally at all, though that part
# require debugging to assert
assert_one(cursor, "SELECT p, v FROM test WHERE k=0 AND p=1", [1, 1])
# Check selecting only a static column with distinct only yield one value
# (as we only query the static columns)
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=0", [24])
# But without DISTINCT, we still get one result per row
assert_all(cursor, "SELECT s FROM test WHERE k=0", [[24], [24]])
# but that querying other columns does correctly yield the full partition
assert_all(cursor, "SELECT s, v FROM test WHERE k=0", [[24, 0], [24, 1]])
assert_one(cursor, "SELECT s, v FROM test WHERE k=0 AND p=1", [24, 1])
assert_one(cursor, "SELECT p, s FROM test WHERE k=0 AND p=1", [1, 24])
assert_one(cursor, "SELECT k, p, s FROM test WHERE k=0 AND p=1", [0, 1, 24])
# Check that deleting a row don't implicitely deletes statics
cursor.execute("DELETE FROM test WHERE k=0 AND p=0")
assert_all(cursor, "SELECT * FROM test", [[0, 1, 24, 1]])
# But that explicitely deleting the static column does remove it
cursor.execute("DELETE s FROM test WHERE k=0")
assert_all(cursor, "SELECT * FROM test", [[0, 1, None, 1]])
# Check we can add a static column ...
cursor.execute("ALTER TABLE test ADD s2 int static")
assert_all(cursor, "SELECT * FROM test", [[0, 1, None, None, 1]])
cursor.execute("INSERT INTO TEST (k, p, s2, v) VALUES(0, 2, 42, 2)")
assert_all(cursor, "SELECT * FROM test", [[0, 1, None, 42, 1], [0, 2, None, 42, 2]])
# ... and that we can drop it
cursor.execute("ALTER TABLE test DROP s2")
assert_all(cursor, "SELECT * FROM test", [[0, 1, None, 1], [0, 2, None, 2]])
@since('2.0')
def static_columns_cas_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
id int,
k text,
version int static,
v text,
PRIMARY KEY (id, k)
)
""")
# Test that INSERT IF NOT EXISTS concerns only the static column if no clustering nor regular columns
# is provided, but concerns the CQL3 row targetted by the clustering columns otherwise
cursor.execute("INSERT INTO test(id, k, v) VALUES (1, 'foo', 'foo')")
assert_one(cursor, "INSERT INTO test(id, k, version) VALUES (1, 'foo', 1) IF NOT EXISTS", [False, 1, 'foo', None, 'foo'])
assert_one(cursor, "INSERT INTO test(id, version) VALUES (1, 1) IF NOT EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [1, 'foo', 1, 'foo'])
cursor.execute("DELETE FROM test WHERE id = 1")
cursor.execute("INSERT INTO test(id, version) VALUES (0, 0)")
assert_one(cursor, "UPDATE test SET v='foo', version=1 WHERE id=0 AND k='k1' IF version = 0", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 1, 'foo']])
assert_one(cursor, "UPDATE test SET v='bar', version=1 WHERE id=0 AND k='k2' IF version = 0", [False, 1])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 1, 'foo']])
assert_one(cursor, "UPDATE test SET v='bar', version=2 WHERE id=0 AND k='k2' IF version = 1", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 2, 'foo'], [0, 'k2', 2, 'bar']])
# Testing batches
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='foobar' WHERE id=0 AND k='k1';
UPDATE test SET v='barfoo' WHERE id=0 AND k='k2';
UPDATE test SET version=3 WHERE id=0 IF version=1;
APPLY BATCH
""", [False, 0, None, 2])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='foobar' WHERE id=0 AND k='k1';
UPDATE test SET v='barfoo' WHERE id=0 AND k='k2';
UPDATE test SET version=3 WHERE id=0 IF version=2;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 3, 'foobar'], [0, 'k2', 3, 'barfoo']])
assert_all(cursor,
"""
BEGIN BATCH
UPDATE test SET version=4 WHERE id=0 IF version=3;
UPDATE test SET v='row1' WHERE id=0 AND k='k1' IF v='foo';
UPDATE test SET v='row2' WHERE id=0 AND k='k2' IF v='bar';
APPLY BATCH
""", [[False, 0, 'k1', 3, 'foobar'], [False, 0, 'k2', 3, 'barfoo']])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET version=4 WHERE id=0 IF version=3;
UPDATE test SET v='row1' WHERE id=0 AND k='k1' IF v='foobar';
UPDATE test SET v='row2' WHERE id=0 AND k='k2' IF v='barfoo';
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 4, 'row1'], [0, 'k2', 4, 'row2']])
assert_invalid(cursor,
"""
BEGIN BATCH
UPDATE test SET version=5 WHERE id=0 IF version=4;
UPDATE test SET v='row1' WHERE id=0 AND k='k1';
UPDATE test SET v='row2' WHERE id=1 AND k='k2';
APPLY BATCH
""")
assert_one(cursor,
"""
BEGIN BATCH
INSERT INTO TEST (id, k, v) VALUES(1, 'k1', 'val1') IF NOT EXISTS;
INSERT INTO TEST (id, k, v) VALUES(1, 'k2', 'val2') IF NOT EXISTS;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', None, 'val1'], [1, 'k2', None, 'val2']])
assert_one(cursor,
"""
BEGIN BATCH
INSERT INTO TEST (id, k, v) VALUES(1, 'k2', 'val2') IF NOT EXISTS;
INSERT INTO TEST (id, k, v) VALUES(1, 'k3', 'val3') IF NOT EXISTS;
APPLY BATCH
""", [False, 1, 'k2', None, 'val2'])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal' WHERE id=1 AND k='k2' IF v='val0';
INSERT INTO TEST (id, k, v) VALUES(1, 'k3', 'val3') IF NOT EXISTS;
APPLY BATCH
""", [False, 1, 'k2', None, 'val2'])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', None, 'val1'], [1, 'k2', None, 'val2']])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal' WHERE id=1 AND k='k2' IF v='val2';
INSERT INTO TEST (id, k, v, version) VALUES(1, 'k3', 'val3', 1) IF NOT EXISTS;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', 1, 'val1'], [1, 'k2', 1, 'newVal'], [1, 'k3', 1, 'val3']])
if self.cluster.version() >= '2.1':
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal1' WHERE id=1 AND k='k2' IF v='val2';
UPDATE test SET v='newVal2' WHERE id=1 AND k='k2' IF v='val3';
APPLY BATCH
""", [False, 1, 'k2', 'newVal'])
else:
assert_invalid(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal1' WHERE id=1 AND k='k2' IF v='val2';
UPDATE test SET v='newVal2' WHERE id=1 AND k='k2' IF v='val3';
APPLY BATCH
""")
@since('2.0')
def static_columns_with_2i_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
v int,
PRIMARY KEY (k, p)
)
""")
cursor.execute("CREATE INDEX ON test(v)")
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 0, 42, 1)")
cursor.execute("INSERT INTO test(k, p, v) VALUES (0, 1, 1)")
cursor.execute("INSERT INTO test(k, p, v) VALUES (0, 2, 2)")
assert_all(cursor, "SELECT * FROM test WHERE v = 1", [[0, 0, 42, 1], [0, 1, 42, 1]])
assert_all(cursor, "SELECT p, s FROM test WHERE v = 1", [[0, 42], [1, 42]])
assert_all(cursor, "SELECT p FROM test WHERE v = 1", [[0], [1]])
# We don't support that
assert_invalid(cursor, "SELECT s FROM test WHERE v = 1")
@since('2.0')
def static_columns_with_distinct_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
PRIMARY KEY (k, p)
)
""")
cursor.execute("INSERT INTO test (k, p) VALUES (1, 1)")
cursor.execute("INSERT INTO test (k, p) VALUES (1, 2)")
assert_all(cursor, "SELECT k, s FROM test", [[1, None], [1, None]])
assert_one(cursor, "SELECT DISTINCT k, s FROM test", [1, None])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=1", [None])
assert_none(cursor, "SELECT DISTINCT s FROM test WHERE k=2")
cursor.execute("INSERT INTO test (k, p, s) VALUES (2, 1, 3)")
cursor.execute("INSERT INTO test (k, p) VALUES (2, 2)")
assert_all(cursor, "SELECT k, s FROM test", [[1, None], [1, None], [2, 3], [2, 3]])
assert_all(cursor, "SELECT DISTINCT k, s FROM test", [[1, None], [2, 3]])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=1", [None])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=2", [3])
assert_invalid(cursor, "SELECT DISTINCT s FROM test")
# paging to test for CASSANDRA-8108
cursor.execute("TRUNCATE test")
for i in range(10):
for j in range(10):
cursor.execute("INSERT INTO test (k, p, s) VALUES (%s, %s, %s)", (i, j, i))
cursor.default_fetch_size = 7
rows = list(cursor.execute("SELECT DISTINCT k, s FROM test"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s FROM test WHERE k IN (%s)" % (keys,)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(10), [r[1] for r in rows])
# additional testing for CASSANRA-8087
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
s1 int static,
s2 int static,
PRIMARY KEY (k, c1, c2)
)
""")
for i in range(10):
for j in range(5):
for k in range(5):
cursor.execute("INSERT INTO test2 (k, c1, c2, s1, s2) VALUES (%s, %s, %s, %s, %s)", (i, j, k, i, i + 1))
for fetch_size in (None, 2, 5, 7, 10, 24, 25, 26, 1000):
cursor.default_fetch_size = fetch_size
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
rows = list(cursor.execute("SELECT DISTINCT k, s2 FROM test2"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(1, 11), sorted([r[1] for r in rows]))
print "page size: ", fetch_size
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 LIMIT 10"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 WHERE k IN (%s)" % (keys,)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(10), [r[1] for r in rows])
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s2 FROM test2 WHERE k IN (%s)" % (keys,)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(1, 11), [r[1] for r in rows])
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 WHERE k IN (%s) LIMIT 10" % (keys,)))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
def select_count_paging_test(self):
""" Test for the #6579 'select count' paging bug """
cursor = self.prepare()
cursor.execute("create table test(field1 text, field2 timeuuid, field3 boolean, primary key(field1, field2));")
cursor.execute("create index test_index on test(field3);")
cursor.execute("insert into test(field1, field2, field3) values ('hola', now(), false);")
cursor.execute("insert into test(field1, field2, field3) values ('hola', now(), false);")
if self.cluster.version() > '3.0':
assert_one(cursor, "select count(*) from test where field3 = false limit 1;", [2])
else:
assert_one(cursor, "select count(*) from test where field3 = false limit 1;", [1])
@since('2.0')
def cas_and_ttl_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int, lock boolean)")
cursor.execute("INSERT INTO test (k, v, lock) VALUES (0, 0, false)")
cursor.execute("UPDATE test USING TTL 1 SET lock=true WHERE k=0")
time.sleep(2)
assert_one(cursor, "UPDATE test SET v = 1 WHERE k = 0 IF lock = null", [True])
@since('2.0')
def tuple_notation_test(self):
""" Test the syntax introduced by #4851 """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, v1 int, v2 int, v3 int, PRIMARY KEY (k, v1, v2, v3))")
for i in range(0, 2):
for j in range(0, 2):
for k in range(0, 2):
cursor.execute("INSERT INTO test(k, v1, v2, v3) VALUES (0, %d, %d, %d)" % (i, j, k))
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0", [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2, v3) >= (1, 0, 1)", [[1, 0, 1], [1, 1, 0], [1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2) >= (1, 1)", [[1, 1, 0], [1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2) > (0, 1) AND (v1, v2, v3) <= (1, 1, 0)", [[1, 0, 0], [1, 0, 1], [1, 1, 0]])
assert_invalid(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v3) > (1, 0)")
@since('2.1.1')
def test_v2_protocol_IN_with_tuples(self):
""" Test for CASSANDRA-8062 """
cursor = self.prepare()
cursor = self.cql_connection(self.cluster.nodelist()[0], keyspace='ks', protocol_version=2)
cursor.execute("CREATE TABLE test (k int, c1 int, c2 text, PRIMARY KEY (k, c1, c2))")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'a')")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'b')")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'c')")
p = cursor.prepare("SELECT * FROM test WHERE k=? AND (c1, c2) IN ?")
rows = cursor.execute(p, (0, [(0, 'b'), (0, 'c')]))
self.assertEqual(2, len(rows))
self.assertEqual((0, 0, 'b'), rows[0])
self.assertEqual((0, 0, 'c'), rows[1])
def in_with_desc_order_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2))")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 1)")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 2)")
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 DESC", [[0, 0, 2], [0, 0, 0]])
@since('2.1')
def in_order_by_without_selecting_test(self):
""" Test that columns don't need to be selected for ORDER BY when there is a IN (#4911) """
cursor = self.prepare()
cursor.default_fetch_size = None
cursor.execute("CREATE TABLE test (k int, c1 int, c2 int, v int, PRIMARY KEY (k, c1, c2))")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 0, 0)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 1, 1)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 2, 2)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 0, 3)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 1, 4)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 2, 5)")
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0, 0, 0, 0], [0, 0, 2, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC, c2 ASC", [[0, 0, 0, 0], [0, 0, 2, 2]])
# check that we don't need to select the column on which we order
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC", [[0], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 DESC", [[2], [0]])
if self.cluster.version() >= '3.0':
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0)", [[0], [1], [2], [3], [4], [5]])
else:
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0)", [[3], [4], [5], [0], [1], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0) ORDER BY c1 ASC", [[0], [1], [2], [3], [4], [5]])
# we should also be able to use functions in the select clause (additional test for CASSANDRA-8286)
results = cursor.execute("SELECT writetime(v) FROM test WHERE k IN (1, 0) ORDER BY c1 ASC")
# since we don't know the write times, just assert that the order matches the order we expect
self.assertEqual(results, list(sorted(results)))
@since('2.0')
def cas_and_compact_test(self):
""" Test for CAS with compact storage table, and #6813 in particular """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE lock (
partition text,
key text,
owner text,
PRIMARY KEY (partition, key)
) WITH COMPACT STORAGE
""")
cursor.execute("INSERT INTO lock(partition, key, owner) VALUES ('a', 'b', null)")
assert_one(cursor, "UPDATE lock SET owner='z' WHERE partition='a' AND key='b' IF owner=null", [True])
assert_one(cursor, "UPDATE lock SET owner='b' WHERE partition='a' AND key='b' IF owner='a'", [False, 'z'])
assert_one(cursor, "UPDATE lock SET owner='b' WHERE partition='a' AND key='b' IF owner='z'", [True])
assert_one(cursor, "INSERT INTO lock(partition, key, owner) VALUES ('a', 'c', 'x') IF NOT EXISTS", [True])
@since('2.1.1')
def whole_list_conditional_test(self):
cursor = self.prepare()
frozen_values = (False, True) if self.cluster.version() >= "2.1.3" else (False,)
for frozen in frozen_values:
cursor.execute("DROP TABLE IF EXISTS tlist")
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l %s
)""" % ("frozen<list<text>>" if frozen else "list<text>",))
cursor.execute("INSERT INTO tlist(k, l) VALUES (0, ['foo', 'bar', 'foobar'])")
def check_applies(condition):
assert_one(cursor, "UPDATE tlist SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (condition,), [True])
assert_one(cursor, "SELECT * FROM tlist", [0, ['foo', 'bar', 'foobar']])
check_applies("l = ['foo', 'bar', 'foobar']")
check_applies("l != ['baz']")
check_applies("l > ['a']")
check_applies("l >= ['a']")
check_applies("l < ['z']")
check_applies("l <= ['z']")
check_applies("l IN (null, ['foo', 'bar', 'foobar'], ['a'])")
# multiple conditions
check_applies("l > ['aaa', 'bbb'] AND l > ['aaa']")
check_applies("l != null AND l IN (['foo', 'bar', 'foobar'])")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE tlist SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (condition,),
[False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "SELECT * FROM tlist", [0, ['foo', 'bar', 'foobar']])
# should not apply
check_does_not_apply("l = ['baz']")
check_does_not_apply("l != ['foo', 'bar', 'foobar']")
check_does_not_apply("l > ['z']")
check_does_not_apply("l >= ['z']")
check_does_not_apply("l < ['a']")
check_does_not_apply("l <= ['a']")
check_does_not_apply("l IN (['a'], null)")
check_does_not_apply("l IN ()")
# multiple conditions
check_does_not_apply("l IN () AND l IN (['foo', 'bar', 'foobar'])")
check_does_not_apply("l > ['zzz'] AND l < ['zzz']")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE tlist SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (condition,), expected=expected)
assert_one(cursor, "SELECT * FROM tlist", [0, ['foo', 'bar', 'foobar']])
check_invalid("l = [null]")
check_invalid("l < null")
check_invalid("l <= null")
check_invalid("l > null")
check_invalid("l >= null")
check_invalid("l IN null", expected=SyntaxException)
check_invalid("l IN 367", expected=SyntaxException)
check_invalid("l CONTAINS KEY 123", expected=SyntaxException)
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
@since('2.0')
def list_item_conditional_test(self):
# Lists
cursor = self.prepare()
frozen_values = (False, True) if self.cluster.version() >= "2.1.3" else (False,)
for frozen in frozen_values:
cursor.execute("DROP TABLE IF EXISTS tlist")
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l %s,
)""" % ("frozen<list<text>>" if frozen else "list<text>",))
cursor.execute("INSERT INTO tlist(k, l) VALUES (0, ['foo', 'bar', 'foobar'])")
assert_invalid(cursor, "DELETE FROM tlist WHERE k=0 IF l[null] = 'foobar'")
assert_invalid(cursor, "DELETE FROM tlist WHERE k=0 IF l[-2] = 'foobar'")
if self.cluster.version() < "2.1":
# no longer invalid after CASSANDRA-6839
assert_invalid(cursor, "DELETE FROM tlist WHERE k=0 IF l[3] = 'foobar'")
assert_one(cursor, "DELETE FROM tlist WHERE k=0 IF l[1] = null", [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "DELETE FROM tlist WHERE k=0 IF l[1] = 'foobar'", [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "SELECT * FROM tlist", [0, ['foo', 'bar', 'foobar']])
assert_one(cursor, "DELETE FROM tlist WHERE k=0 IF l[1] = 'bar'", [True])
assert_none(cursor, "SELECT * FROM tlist")
@since('2.1.1')
def expanded_list_item_conditional_test(self):
# expanded functionality from CASSANDRA-6839
cursor = self.prepare()
frozen_values = (False, True) if self.cluster.version() >= "2.1.3" else (False,)
for frozen in frozen_values:
cursor.execute("DROP TABLE IF EXISTS tlist")
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l %s
)""" % ("frozen<list<text>>" if frozen else "list<text>",))
cursor.execute("INSERT INTO tlist(k, l) VALUES (0, ['foo', 'bar', 'foobar'])")
def check_applies(condition):
assert_one(cursor, "UPDATE tlist SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (condition,), [True])
assert_one(cursor, "SELECT * FROM tlist", [0, ['foo', 'bar', 'foobar']])
check_applies("l[1] < 'zzz'")
check_applies("l[1] <= 'bar'")
check_applies("l[1] > 'aaa'")
check_applies("l[1] >= 'bar'")
check_applies("l[1] != 'xxx'")
check_applies("l[1] != null")
check_applies("l[1] IN (null, 'xxx', 'bar')")
check_applies("l[1] > 'aaa' AND l[1] < 'zzz'")
# check beyond end of list
check_applies("l[3] = null")
check_applies("l[3] IN (null, 'xxx', 'bar')")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE tlist SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (condition,), [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "SELECT * FROM tlist", [0, ['foo', 'bar', 'foobar']])
check_does_not_apply("l[1] < 'aaa'")
check_does_not_apply("l[1] <= 'aaa'")
check_does_not_apply("l[1] > 'zzz'")
check_does_not_apply("l[1] >= 'zzz'")
check_does_not_apply("l[1] != 'bar'")
check_does_not_apply("l[1] IN (null, 'xxx')")
check_does_not_apply("l[1] IN ()")
check_does_not_apply("l[1] != null AND l[1] IN ()")
# check beyond end of list
check_does_not_apply("l[3] != null")
check_does_not_apply("l[3] = 'xxx'")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE tlist SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (condition,), expected=expected)
assert_one(cursor, "SELECT * FROM tlist", [0, ['foo', 'bar', 'foobar']])
check_invalid("l[1] < null")
check_invalid("l[1] <= null")
check_invalid("l[1] > null")
check_invalid("l[1] >= null")
check_invalid("l[1] IN null", expected=SyntaxException)
check_invalid("l[1] IN 367", expected=SyntaxException)
check_invalid("l[1] IN (1, 2, 3)")
check_invalid("l[1] CONTAINS 367", expected=SyntaxException)
check_invalid("l[1] CONTAINS KEY 367", expected=SyntaxException)
check_invalid("l[null] = null")
@since('2.1.1')
def whole_set_conditional_test(self):
cursor = self.prepare()
frozen_values = (False, True) if self.cluster.version() >= "2.1.3" else (False,)
for frozen in frozen_values:
cursor.execute("DROP TABLE IF EXISTS tset")
cursor.execute("""
CREATE TABLE tset (
k int PRIMARY KEY,
s %s
)""" % ("frozen<set<text>>" if frozen else "set<text>",))
cursor.execute("INSERT INTO tset(k, s) VALUES (0, {'bar', 'foo'})")
def check_applies(condition):
assert_one(cursor, "UPDATE tset SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (condition,), [True])
assert_one(cursor, "SELECT * FROM tset", [0, set(['bar', 'foo'])])
check_applies("s = {'bar', 'foo'}")
check_applies("s = {'foo', 'bar'}")
check_applies("s != {'baz'}")
check_applies("s > {'a'}")
check_applies("s >= {'a'}")
check_applies("s < {'z'}")
check_applies("s <= {'z'}")
check_applies("s IN (null, {'bar', 'foo'}, {'a'})")
# multiple conditions
check_applies("s > {'a'} AND s < {'z'}")
check_applies("s IN (null, {'bar', 'foo'}, {'a'}) AND s IN ({'a'}, {'bar', 'foo'}, null)")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE tset SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (condition,),
[False, {'bar', 'foo'}])
assert_one(cursor, "SELECT * FROM tset", [0, {'bar', 'foo'}])
# should not apply
check_does_not_apply("s = {'baz'}")
check_does_not_apply("s != {'bar', 'foo'}")
check_does_not_apply("s > {'z'}")
check_does_not_apply("s >= {'z'}")
check_does_not_apply("s < {'a'}")
check_does_not_apply("s <= {'a'}")
check_does_not_apply("s IN ({'a'}, null)")
check_does_not_apply("s IN ()")
check_does_not_apply("s != null AND s IN ()")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE tset SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (condition,), expected=expected)
assert_one(cursor, "SELECT * FROM tset", [0, {'bar', 'foo'}])
check_invalid("s = {null}")
check_invalid("s < null")
check_invalid("s <= null")
check_invalid("s > null")
check_invalid("s >= null")
check_invalid("s IN null", expected=SyntaxException)
check_invalid("s IN 367", expected=SyntaxException)
check_invalid("s CONTAINS KEY 123", expected=SyntaxException)
# element access is not allow for sets
check_invalid("s['foo'] = 'foobar'")
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
@since('2.1.1')
def whole_map_conditional_test(self):
cursor = self.prepare()
frozen_values = (False, True) if self.cluster.version() >= "2.1.3" else (False,)
for frozen in frozen_values:
cursor.execute("DROP TABLE IF EXISTS tmap")
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m %s
)""" % ("frozen<map<text, text>>" if frozen else "map<text, text>",))
cursor.execute("INSERT INTO tmap(k, m) VALUES (0, {'foo' : 'bar'})")
def check_applies(condition):
assert_one(cursor, "UPDATE tmap SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (condition,), [True])
assert_one(cursor, "SELECT * FROM tmap", [0, {'foo': 'bar'}])
check_applies("m = {'foo': 'bar'}")
check_applies("m > {'a': 'a'}")
check_applies("m >= {'a': 'a'}")
check_applies("m < {'z': 'z'}")
check_applies("m <= {'z': 'z'}")
check_applies("m != {'a': 'a'}")
check_applies("m IN (null, {'a': 'a'}, {'foo': 'bar'})")
# multiple conditions
check_applies("m > {'a': 'a'} AND m < {'z': 'z'}")
check_applies("m != null AND m IN (null, {'a': 'a'}, {'foo': 'bar'})")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE tmap SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (condition,), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM tmap", [0, {'foo': 'bar'}])
# should not apply
check_does_not_apply("m = {'a': 'a'}")
check_does_not_apply("m > {'z': 'z'}")
check_does_not_apply("m >= {'z': 'z'}")
check_does_not_apply("m < {'a': 'a'}")
check_does_not_apply("m <= {'a': 'a'}")
check_does_not_apply("m != {'foo': 'bar'}")
check_does_not_apply("m IN ({'a': 'a'}, null)")
check_does_not_apply("m IN ()")
check_does_not_apply("m = null AND m != null")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE tmap SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (condition,), expected=expected)
assert_one(cursor, "SELECT * FROM tmap", [0, {'foo': 'bar'}])
check_invalid("m = {null: null}")
check_invalid("m = {'a': null}")
check_invalid("m = {null: 'a'}")
check_invalid("m < null")
check_invalid("m IN null", expected=SyntaxException)
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
check_invalid("m CONTAINS KEY 'foo'", expected=SyntaxException)
check_invalid("m CONTAINS null", expected=SyntaxException)
check_invalid("m CONTAINS KEY null", expected=SyntaxException)
@since('2.0')
def map_item_conditional_test(self):
cursor = self.prepare()
frozen_values = (False, True) if self.cluster.version() >= "2.1.3" else (False,)
for frozen in frozen_values:
cursor.execute("DROP TABLE IF EXISTS tmap")
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m %s
)""" % ("frozen<map<text, text>>" if frozen else "map<text, text>",))
cursor.execute("INSERT INTO tmap(k, m) VALUES (0, {'foo' : 'bar'})")
assert_invalid(cursor, "DELETE FROM tmap WHERE k=0 IF m[null] = 'foo'")
assert_one(cursor, "DELETE FROM tmap WHERE k=0 IF m['foo'] = 'foo'", [False, {'foo': 'bar'}])
assert_one(cursor, "DELETE FROM tmap WHERE k=0 IF m['foo'] = null", [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM tmap", [0, {'foo': 'bar'}])
assert_one(cursor, "DELETE FROM tmap WHERE k=0 IF m['foo'] = 'bar'", [True])
assert_none(cursor, "SELECT * FROM tmap")
if self.cluster.version() > "2.1.1":
cursor.execute("INSERT INTO tmap(k, m) VALUES (1, null)")
if frozen:
assert_invalid(cursor, "UPDATE tmap set m['foo'] = 'bar', m['bar'] = 'foo' WHERE k = 1 IF m['foo'] IN ('blah', null)")
else:
assert_one(cursor, "UPDATE tmap set m['foo'] = 'bar', m['bar'] = 'foo' WHERE k = 1 IF m['foo'] IN ('blah', null)", [True])
@since('2.1.1')
def expanded_map_item_conditional_test(self):
# expanded functionality from CASSANDRA-6839
cursor = self.prepare()
frozen_values = (False, True) if self.cluster.version() >= "2.1.3" else (False,)
for frozen in frozen_values:
cursor.execute("DROP TABLE IF EXISTS tmap")
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m %s
)""" % ("frozen<map<text, text>>" if frozen else "map<text, text>",))
cursor.execute("INSERT INTO tmap(k, m) VALUES (0, {'foo' : 'bar'})")
def check_applies(condition):
assert_one(cursor, "UPDATE tmap SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (condition,), [True])
assert_one(cursor, "SELECT * FROM tmap", [0, {'foo': 'bar'}])
check_applies("m['xxx'] = null")
check_applies("m['foo'] < 'zzz'")
check_applies("m['foo'] <= 'bar'")
check_applies("m['foo'] > 'aaa'")
check_applies("m['foo'] >= 'bar'")
check_applies("m['foo'] != 'xxx'")
check_applies("m['foo'] != null")
check_applies("m['foo'] IN (null, 'xxx', 'bar')")
check_applies("m['xxx'] IN (null, 'xxx', 'bar')") # m['xxx'] is not set
# multiple conditions
check_applies("m['foo'] < 'zzz' AND m['foo'] > 'aaa'")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE tmap SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (condition,), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM tmap", [0, {'foo': 'bar'}])
check_does_not_apply("m['foo'] < 'aaa'")
check_does_not_apply("m['foo'] <= 'aaa'")
check_does_not_apply("m['foo'] > 'zzz'")
check_does_not_apply("m['foo'] >= 'zzz'")
check_does_not_apply("m['foo'] != 'bar'")
check_does_not_apply("m['xxx'] != null") # m['xxx'] is not set
check_does_not_apply("m['foo'] IN (null, 'xxx')")
check_does_not_apply("m['foo'] IN ()")
check_does_not_apply("m['foo'] != null AND m['foo'] = null")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE tmap SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (condition,), expected=expected)
assert_one(cursor, "SELECT * FROM tmap", [0, {'foo': 'bar'}])
check_invalid("m['foo'] < null")
check_invalid("m['foo'] <= null")
check_invalid("m['foo'] > null")
check_invalid("m['foo'] >= null")
check_invalid("m['foo'] IN null", expected=SyntaxException)
check_invalid("m['foo'] IN 367", expected=SyntaxException)
check_invalid("m['foo'] IN (1, 2, 3)")
check_invalid("m['foo'] CONTAINS 367", expected=SyntaxException)
check_invalid("m['foo'] CONTAINS KEY 367", expected=SyntaxException)
check_invalid("m[null] = null")
@since("2.1.1")
def cas_and_list_index_test(self):
""" Test for 7499 test """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v text,
l list<text>
)
""")
cursor.execute("INSERT INTO test(k, v, l) VALUES(0, 'foobar', ['foi', 'bar'])")
assert_one(cursor, "UPDATE test SET l[0] = 'foo' WHERE k = 0 IF v = 'barfoo'", [False, 'foobar'])
assert_one(cursor, "UPDATE test SET l[0] = 'foo' WHERE k = 0 IF v = 'foobar'", [True])
assert_one(cursor, "SELECT * FROM test", [0, ['foo', 'bar'], 'foobar'])
@since("2.0")
def static_with_limit_test(self):
""" Test LIMIT when static columns are present (#6956) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
s int static,
v int,
PRIMARY KEY (k, v)
)
""")
cursor.execute("INSERT INTO test(k, s) VALUES(0, 42)")
for i in range(0, 4):
cursor.execute("INSERT INTO test(k, v) VALUES(0, %d)" % i)
assert_one(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 1", [0, 0, 42])
assert_all(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 2", [[0, 0, 42], [0, 1, 42]])
assert_all(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 3", [[0, 0, 42], [0, 1, 42], [0, 2, 42]])
@since("2.0")
def static_with_empty_clustering_test(self):
""" Test for bug of #7455 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
pkey text,
ckey text,
value text,
static_value text static,
PRIMARY KEY(pkey, ckey)
)
""")
cursor.execute("INSERT INTO test(pkey, static_value) VALUES ('partition1', 'static value')")
cursor.execute("INSERT INTO test(pkey, ckey, value) VALUES('partition1', '', 'value')")
assert_one(cursor, "SELECT * FROM test", ['partition1', '', 'static value', 'value'])
@since("1.2")
def limit_compact_table(self):
""" Check for #7052 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k, v)
) WITH COMPACT STORAGE
""")
for i in range(0, 4):
for j in range(0, 4):
cursor.execute("INSERT INTO test(k, v) VALUES (%d, %d)" % (i, j))
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND v > 0 AND v <= 4 LIMIT 2", [[1], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND v > -1 AND v <= 4 LIMIT 2", [[0], [1]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > 0 AND v <= 4 LIMIT 2", [[0, 1], [0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > -1 AND v <= 4 LIMIT 2", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > 0 AND v <= 4 LIMIT 6", [[0, 1], [0, 2], [0, 3], [1, 1], [1, 2], [1, 3]])
# This doesn't work -- see #7059
#assert_all(cursor, "SELECT * FROM test WHERE v > 1 AND v <= 3 LIMIT 6 ALLOW FILTERING", [[1, 2], [1, 3], [0, 2], [0, 3], [2, 2], [2, 3]])
def key_index_with_reverse_clustering(self):
""" Test for #6950 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
v int,
PRIMARY KEY ((k1, k2), v)
) WITH CLUSTERING ORDER BY (v DESC)
""")
cursor.execute("CREATE INDEX ON test(k2)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 0, 1)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 1, 2)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 0, 3)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (1, 0, 4)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (1, 1, 5)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (2, 0, 7)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (2, 1, 8)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (3, 0, 1)")
assert_all(cursor, "SELECT * FROM test WHERE k2 = 0 AND v >= 2 ALLOW FILTERING", [[2, 0, 7], [0, 0, 3], [1, 0, 4]])
@since('2.1')
def invalid_custom_timestamp_test(self):
cursor = self.prepare()
# Conditional updates
cursor.execute("CREATE TABLE test (k int, v int, PRIMARY KEY (k, v))")
cursor.execute("BEGIN BATCH INSERT INTO test(k, v) VALUES(0, 0) IF NOT EXISTS; INSERT INTO test(k, v) VALUES(0, 1) IF NOT EXISTS; APPLY BATCH")
assert_invalid(cursor, "BEGIN BATCH INSERT INTO test(k, v) VALUES(0, 2) IF NOT EXISTS USING TIMESTAMP 1; INSERT INTO test(k, v) VALUES(0, 3) IF NOT EXISTS; APPLY BATCH")
assert_invalid(cursor, "BEGIN BATCH USING TIMESTAMP 1 INSERT INTO test(k, v) VALUES(0, 4) IF NOT EXISTS; INSERT INTO test(k, v) VALUES(0, 1) IF NOT EXISTS; APPLY BATCH")
cursor.execute("INSERT INTO test(k, v) VALUES(1, 0) IF NOT EXISTS")
assert_invalid(cursor, "INSERT INTO test(k, v) VALUES(1, 1) IF NOT EXISTS USING TIMESTAMP 5")
# Counters
cursor.execute("CREATE TABLE counters (k int PRIMARY KEY, c counter)")
cursor.execute("UPDATE counters SET c = c + 1 WHERE k = 0")
assert_invalid(cursor, "UPDATE counters USING TIMESTAMP 10 SET c = c + 1 WHERE k = 0")
cursor.execute("BEGIN COUNTER BATCH UPDATE counters SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
assert_invalid(cursor, "BEGIN COUNTER BATCH UPDATE counters USING TIMESTAMP 3 SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
assert_invalid(cursor, "BEGIN COUNTER BATCH USING TIMESTAMP 3 UPDATE counters SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
@since('2.1')
def add_field_to_udt_test(self):
cursor = self.prepare()
cursor.execute("CREATE TYPE footype (fooint int, fooset set <text>)")
cursor.execute("CREATE TABLE test (key int PRIMARY KEY, data frozen<footype>)")
cursor.execute("INSERT INTO test (key, data) VALUES (1, {fooint: 1, fooset: {'2'}})")
cursor.execute("ALTER TYPE footype ADD foomap map <int,text>")
cursor.execute("INSERT INTO test (key, data) VALUES (1, {fooint: 1, fooset: {'2'}, foomap: {3 : 'bar'}})")
@since('1.2')
def clustering_order_in_test(self):
"""Test for #7105 bug"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
a int,
b int,
c int,
PRIMARY KEY ((a, b), c)
) with clustering order by (c desc)
""")
cursor.execute("INSERT INTO test (a, b, c) VALUES (1, 2, 3)")
cursor.execute("INSERT INTO test (a, b, c) VALUES (4, 5, 6)")
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 AND c IN (3)", [1, 2, 3])
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 AND c IN (3, 4)", [1, 2, 3])
@since('1.2')
def bug7105_test(self):
"""Test for #7105 bug"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
a int,
b int,
c int,
d int,
PRIMARY KEY (a, b)
)
""")
cursor.execute("INSERT INTO test (a, b, c, d) VALUES (1, 2, 3, 3)")
cursor.execute("INSERT INTO test (a, b, c, d) VALUES (1, 4, 6, 5)")
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 ORDER BY b DESC", [1, 2, 3, 3])
@since('2.0')
def conditional_ddl_keyspace_test(self):
cursor = self.prepare(create_keyspace=False)
# try dropping when doesn't exist
cursor.execute("""
DROP KEYSPACE IF EXISTS my_test_ks
""")
# create and confirm
cursor.execute("""
CREATE KEYSPACE IF NOT EXISTS my_test_ks
WITH replication = {'class':'SimpleStrategy', 'replication_factor':1} and durable_writes = true
""")
assert_one(cursor, "select durable_writes from system.schema_keyspaces where keyspace_name = 'my_test_ks';",
[True], cl=ConsistencyLevel.ALL)
# unsuccessful create since it's already there, confirm settings don't change
cursor.execute("""
CREATE KEYSPACE IF NOT EXISTS my_test_ks
WITH replication = {'class':'SimpleStrategy', 'replication_factor':1} and durable_writes = false
""")
assert_one(cursor, "select durable_writes from system.schema_keyspaces where keyspace_name = 'my_test_ks';",
[True], cl=ConsistencyLevel.ALL)
# drop and confirm
cursor.execute("""
DROP KEYSPACE IF EXISTS my_test_ks
""")
assert_none(cursor, "select * from system.schema_keyspaces where keyspace_name = 'my_test_ks'")
@since('2.0')
def conditional_ddl_table_test(self):
cursor = self.prepare(create_keyspace=False)
self.create_ks(cursor, 'my_test_ks', 1)
# try dropping when doesn't exist
cursor.execute("""
DROP TABLE IF EXISTS my_test_table;
""")
# create and confirm
cursor.execute("""
CREATE TABLE IF NOT EXISTS my_test_table (
id text PRIMARY KEY,
value1 blob ) with comment = 'foo';
""")
assert_one(cursor,
"""select comment from system.schema_columnfamilies
where keyspace_name = 'my_test_ks' and columnfamily_name = 'my_test_table'""",
['foo'])
# unsuccessful create since it's already there, confirm settings don't change
cursor.execute("""
CREATE TABLE IF NOT EXISTS my_test_table (
id text PRIMARY KEY,
value2 blob ) with comment = 'bar';
""")
assert_one(cursor,
"""select comment from system.schema_columnfamilies
where keyspace_name = 'my_test_ks' and columnfamily_name = 'my_test_table'""",
['foo'])
# drop and confirm
cursor.execute("""
DROP TABLE IF EXISTS my_test_table;
""")
assert_none(cursor,
"""select * from system.schema_columnfamilies
where keyspace_name = 'my_test_ks' and columnfamily_name = 'my_test_table'""")
@since('2.0')
def conditional_ddl_index_test(self):
cursor = self.prepare(create_keyspace=False)
self.create_ks(cursor, 'my_test_ks', 1)
cursor.execute("""
CREATE TABLE my_test_table (
id text PRIMARY KEY,
value1 blob,
value2 blob) with comment = 'foo';
""")
# try dropping when doesn't exist
cursor.execute("DROP INDEX IF EXISTS myindex")
# create and confirm
cursor.execute("CREATE INDEX IF NOT EXISTS myindex ON my_test_table (value1)")
assert_one(
cursor,
"""select index_name from system."IndexInfo" where table_name = 'my_test_ks'""",
['my_test_table.myindex'])
# unsuccessful create since it's already there
cursor.execute("CREATE INDEX IF NOT EXISTS myindex ON my_test_table (value1)")
# drop and confirm
cursor.execute("DROP INDEX IF EXISTS myindex")
assert_none(cursor, """select index_name from system."IndexInfo" where table_name = 'my_test_ks'""")
@since('2.1')
@freshCluster()
def conditional_ddl_type_test(self):
cursor = self.prepare(create_keyspace=False)
self.create_ks(cursor, 'my_test_ks', 1)
# try dropping when doesn't exist
cursor.execute("DROP TYPE IF EXISTS mytype")
# create and confirm
cursor.execute("CREATE TYPE IF NOT EXISTS mytype (somefield int)")
assert_one(
cursor,
"SELECT type_name from system.schema_usertypes where keyspace_name='my_test_ks' and type_name='mytype'",
['mytype'])
# unsuccessful create since it's already there
# TODO: confirm this create attempt doesn't alter type field from int to blob
cursor.execute("CREATE TYPE IF NOT EXISTS mytype (somefield blob)")
# drop and confirm
cursor.execute("DROP TYPE IF EXISTS mytype")
assert_none(
cursor,
"SELECT type_name from system.schema_usertypes where keyspace_name='my_test_ks' and type_name='mytype'")
@since('2.0')
def bug_6612_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE session_data (
username text,
session_id text,
app_name text,
account text,
last_access timestamp,
created_on timestamp,
PRIMARY KEY (username, session_id, app_name, account)
);
""")
#cursor.execute("create index sessionIndex ON session_data (session_id)")
cursor.execute("create index sessionAppName ON session_data (app_name)")
cursor.execute("create index lastAccessIndex ON session_data (last_access)")
assert_one(cursor, "select count(*) from session_data where app_name='foo' and account='bar' and last_access > 4 allow filtering", [0])
cursor.execute("insert into session_data (username, session_id, app_name, account, last_access, created_on) values ('toto', 'foo', 'foo', 'bar', 12, 13)")
assert_one(cursor, "select count(*) from session_data where app_name='foo' and account='bar' and last_access > 4 allow filtering", [1])
@since('2.0')
def blobAs_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
);
""")
# A blob that is not 4 bytes should be rejected
assert_invalid(cursor, "INSERT INTO test(k, v) VALUES (0, blobAsInt(0x01))")
def alter_clustering_and_static_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE foo (bar int, PRIMARY KEY (bar))")
# We shouldn't allow static when there is not clustering columns
assert_invalid(cursor, "ALTER TABLE foo ADD bar2 text static")
def drop_and_readd_collection_test(self):
""" Test for 6276 """
cursor = self.prepare()
cursor.execute("create table test (k int primary key, v set<text>, x int)")
cursor.execute("insert into test (k, v) VALUES (0, {'fffffffff'})")
self.cluster.flush()
cursor.execute("alter table test drop v")
assert_invalid(cursor, "alter table test add v set<int>")
def downgrade_to_compact_bug_test(self):
""" Test for 7744 """
cursor = self.prepare()
cursor.execute("create table test (k int primary key, v set<text>)")
cursor.execute("insert into test (k, v) VALUES (0, {'f'})")
self.cluster.flush()
cursor.execute("alter table test drop v")
cursor.execute("alter table test add v int")
def invalid_string_literals_test(self):
""" Test for CASSANDRA-8101 """
cursor = self.prepare()
assert_invalid(cursor, u"insert into invalid_string_literals (k, a) VALUES (0, '\u038E\u0394\u03B4\u03E0')")
# since the protocol requires strings to be valid UTF-8, the error response to this is a ProtocolError
cursor = self.cql_connection(self.cluster.nodelist()[0], keyspace='ks')
cursor.execute("create table invalid_string_literals (k int primary key, a ascii, b text)")
try:
cursor.execute("insert into invalid_string_literals (k, c) VALUES (0, '\xc2\x01')")
self.fail("Expected error")
except ProtocolException as e:
self.assertTrue("Cannot decode string as UTF8" in str(e))
def negative_timestamp_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
cursor.execute("INSERT INTO test (k, v) VALUES (1, 1) USING TIMESTAMP -42")
assert_one(cursor, "SELECT writetime(v) FROM TEST WHERE k = 1", [-42])
@since('3.0')
@require("7396")
def select_map_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v map<int, text>)")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {1:'a', 2:'b', 3:'c', 4:'d'})")
assert_one(cursor, "SELECT v[1] FROM test WHERE k = 0", ['a'])
assert_one(cursor, "SELECT v[5] FROM test WHERE k = 0", [])
assert_one(cursor, "SELECT v[1] FROM test WHERE k = 1", [])
assert_one(cursor, "SELECT v[1..3] FROM test WHERE k = 0", ['a', 'b', 'c'])
assert_one(cursor, "SELECT v[3..5] FROM test WHERE k = 0", ['c', 'd'])
assert_invalid(cursor, "SELECT v[3..1] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v[..2] FROM test WHERE k = 0", ['a', 'b'])
assert_one(cursor, "SELECT v[3..] FROM test WHERE k = 0", ['c', 'd'])
assert_one(cursor, "SELECT v[0..] FROM test WHERE k = 0", ['a', 'b', 'c', 'd'])
assert_one(cursor, "SELECT v[..5] FROM test WHERE k = 0", ['a', 'b', 'c', 'd'])
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('3.0')
@require("7396")
def select_set_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v set<text>)")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {'e', 'a', 'd', 'b'})")
assert_one(cursor, "SELECT v FROM test WHERE k = 0", [sortedset(['a', 'b', 'd', 'e'])])
assert_one(cursor, "SELECT v['a'] FROM test WHERE k = 0", [True])
assert_one(cursor, "SELECT v['c'] FROM test WHERE k = 0", [False])
assert_one(cursor, "SELECT v['a'] FROM test WHERE k = 1", [])
assert_one(cursor, "SELECT v['b'..'d'] FROM test WHERE k = 0", ['b', 'd'])
assert_one(cursor, "SELECT v['b'..'e'] FROM test WHERE k = 0", ['b', 'd', 'e'])
assert_one(cursor, "SELECT v['a'..'d'] FROM test WHERE k = 0", ['a', 'b', 'd'])
assert_one(cursor, "SELECT v['b'..'f'] FROM test WHERE k = 0", ['b', 'd', 'e'])
assert_invalid(cursor, "SELECT v['d'..'a'] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v['d'..] FROM test WHERE k = 0", ['d', 'e'])
assert_one(cursor, "SELECT v[..'d'] FROM test WHERE k = 0", ['a', 'b', 'd'])
assert_one(cursor, "SELECT v['f'..] FROM test WHERE k = 0", [])
assert_one(cursor, "SELECT v[..'f'] FROM test WHERE k = 0", ['a', 'b', 'd', 'e'])
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('3.0')
@require("7396")
def select_list_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<text>)")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, ['e', 'a', 'd', 'b'])")
assert_one(cursor, "SELECT v FROM test WHERE k = 0", [['e', 'a', 'd', 'b']])
assert_one(cursor, "SELECT v[0] FROM test WHERE k = 0", ['e'])
assert_one(cursor, "SELECT v[3] FROM test WHERE k = 0", ['b'])
assert_one(cursor, "SELECT v[0] FROM test WHERE k = 1", [])
assert_invalid(cursor, "SELECT v[-1] FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT v[5] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v[1..3] FROM test WHERE k = 0", ['a', 'd', 'b'])
assert_one(cursor, "SELECT v[0..2] FROM test WHERE k = 0", ['e', 'a', 'd'])
assert_invalid(cursor, "SELECT v[0..4] FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT v[2..0] FROM test WHERE k = 0")
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('3.0')
@require("7396")
def select_map_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v map<int, text>)")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {1:'a', 2:'b', 3:'c', 4:'d'})")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, {1:'a', 2:'b', 5:'e', 6:'f'})")
assert_all(cursor, "SELECT v[1] FROM test", [['a'], ['a']])
assert_all(cursor, "SELECT v[5] FROM test", [[], []])
assert_all(cursor, "SELECT v[1] FROM test", [[], []])
assert_all(cursor, "SELECT v[1..3] FROM test", [['a', 'b', 'c'], ['a', 'b', 'e']])
assert_all(cursor, "SELECT v[3..5] FROM test", [['c', 'd'], ['e']])
assert_invalid(cursor, "SELECT v[3..1] FROM test")
assert_all(cursor, "SELECT v[..2] FROM test", [['a', 'b'], ['a', 'b']])
assert_all(cursor, "SELECT v[3..] FROM test", [['c', 'd'], ['e', 'f']])
assert_all(cursor, "SELECT v[0..] FROM test", [['a', 'b', 'c', 'd'], ['a', 'b', 'e', 'f']])
assert_all(cursor, "SELECT v[..5] FROM test", [['a', 'b', 'c', 'd'], ['a', 'b', 'e']])
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
@since('3.0')
@require("7396")
def select_set_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v set<text>)")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {'e', 'a', 'd', 'b'})")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, {'c', 'f', 'd', 'b'})")
assert_all(cursor, "SELECT v FROM test", [[sortedset(['b', 'c', 'd', 'f'])], [sortedset(['a', 'b', 'd', 'e'])]])
assert_all(cursor, "SELECT v['a'] FROM test", [[True], [False]])
assert_all(cursor, "SELECT v['c'] FROM test", [[False], [True]])
assert_all(cursor, "SELECT v['b'..'d'] FROM test", [['b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['b'..'e'] FROM test", [['b', 'd', 'e'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['a'..'d'] FROM test", [['a', 'b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['b'..'f'] FROM test", [['b', 'd', 'e'], ['b', 'c', 'd', 'f']])
assert_invalid(cursor, "SELECT v['d'..'a'] FROM test")
assert_all(cursor, "SELECT v['d'..] FROM test", [['d', 'e'], ['d', 'f']])
assert_all(cursor, "SELECT v[..'d'] FROM test", [['a', 'b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['f'..] FROM test", [[], ['f']])
assert_all(cursor, "SELECT v[..'f'] FROM test", [['a', 'b', 'd', 'e'], ['b', 'c', 'd', 'f']])
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
@since('3.0')
@require("7396")
def select_list_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<text>)")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, ['e', 'a', 'd', 'b'])")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, ['c', 'f', 'd', 'b'])")
assert_all(cursor, "SELECT v FROM test", [[['c', 'f', 'd', 'b']], [['e', 'a', 'd', 'b']]])
assert_all(cursor, "SELECT v[0] FROM test", [['e'], ['c']])
assert_all(cursor, "SELECT v[3] FROM test", [['b'], ['b']])
assert_invalid(cursor, "SELECT v[-1] FROM test")
assert_invalid(cursor, "SELECT v[5] FROM test")
assert_all(cursor, "SELECT v[1..3] FROM test", [['a', 'd', 'b'], ['f', 'd', 'b']])
assert_all(cursor, "SELECT v[0..2] FROM test", [['e', 'a', 'd'], ['c', 'f', 'd']])
assert_invalid(cursor, "SELECT v[0..4] FROM test")
assert_invalid(cursor, "SELECT v[2..0] FROM test")
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
@since('2.1')
def prepared_statement_invalidation_test(self):
# test for CASSANDRA-7910
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, a int, b int, c int)")
cursor.execute("INSERT INTO test (k, a, b, c) VALUES (0, 0, 0, 0)")
wildcard_prepared = cursor.prepare("SELECT * FROM test")
explicit_prepared = cursor.prepare("SELECT k, a, b, c FROM test")
result = cursor.execute(wildcard_prepared.bind(None))
self.assertEqual(result, [(0, 0, 0, 0)])
cursor.execute("ALTER TABLE test DROP c")
result = cursor.execute(wildcard_prepared.bind(None))
# wildcard select can be automatically re-prepared by the driver
self.assertEqual(result, [(0, 0, 0)])
# but re-preparing the statement with explicit columns should fail
# (see PYTHON-207 for why we expect InvalidRequestException instead of the normal exc)
assert_invalid(cursor, explicit_prepared.bind(None), expected=InvalidRequestException)
cursor.execute("ALTER TABLE test ADD d int")
result = cursor.execute(wildcard_prepared.bind(None))
self.assertEqual(result, [(0, 0, 0, None)])
explicit_prepared = cursor.prepare("SELECT k, a, b, d FROM test")
# when the type is altered, both statements will need to be re-prepared
# by the driver, but the re-preparation should succeed
cursor.execute("ALTER TABLE test ALTER d TYPE blob")
result = cursor.execute(wildcard_prepared.bind(None))
self.assertEqual(result, [(0, 0, 0, None)])
result = cursor.execute(explicit_prepared.bind(None))
self.assertEqual(result, [(0, 0, 0, None)])
def bug_8558_test(self):
session = self.prepare()
node1 = self.cluster.nodelist()[0]
session.execute("CREATE KEYSPACE space1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
session.execute("CREATE TABLE space1.table1(a int, b int, c text,primary key(a,b))")
session.execute("INSERT INTO space1.table1(a,b,c) VALUES(1,1,'1')")
node1.nodetool('flush')
session.execute("DELETE FROM space1.table1 where a=1 and b=1")
node1.nodetool('flush')
assert_none(session, "select * from space1.table1 where a=1 and b=1")
def bug_5732_test(self):
cursor = self.prepare(use_cache=True)
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int,
)
""")
cursor.execute("ALTER TABLE test WITH CACHING='ALL'")
cursor.execute("INSERT INTO test (k,v) VALUES (0,0)")
cursor.execute("INSERT INTO test (k,v) VALUES (1,1)")
cursor.execute("CREATE INDEX on test(v)")
assert_all(cursor, "SELECT k FROM test WHERE v = 0", [[0]])
self.cluster.stop()
time.sleep(0.5)
self.cluster.start()
time.sleep(0.5)
cursor = self.patient_cql_connection(self.cluster.nodelist()[0])
assert_all(cursor, "SELECT k FROM ks.test WHERE v = 0", [[0]])
| apache-2.0 |
ramadhane/odoo | addons/report_intrastat/__openerp__.py | 261 | 1805 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Intrastat Reporting',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
A module that adds intrastat reports.
=====================================
This module gives the details of the goods traded between the countries of
European Union.""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base', 'product', 'stock', 'sale', 'purchase'],
'data': [
'security/ir.model.access.csv',
'report_intrastat_view.xml',
'intrastat_report.xml',
'report_intrastat_data.xml',
'views/report_intrastatinvoice.xml'
],
'demo': [],
'test': ['test/report_intrastat_report.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
voytekresearch/fakespikes | fakespikes/neurons.py | 1 | 4820 | # -*- coding: utf-8 -*-
import numpy as np
from numpy.random import RandomState
from fakespikes.util import to_spikes
from fakespikes.rates import constant
class Spikes(object):
"""Simulates statistical models of neural spiking
Params
------
n : int
Number of neurons
t : float
Simulation time (seconds)
dt : float
Time-step (seconds)
refractory : float
Absolute refractory time
seed : None, int, RandomState
The random seed
private_stdev : float
Amount of stdev noise to add to each neurons tuning respose
"""
def __init__(self, n, t, dt=0.001, refractory=0.002, seed=None,
private_stdev=0):
# Ensure reproducible randomess
self.seed = seed
if isinstance(seed, RandomState):
self.prng = seed
elif self.seed is not None:
self.prng = np.random.RandomState(seed)
else:
self.prng = np.random.RandomState()
# Init constraints
if n < 2:
raise ValueError("n must be greater than 2")
if dt > 0.001:
raise ValueError("dt must be less than 0.001 seconds (1 ms)")
if not np.allclose(refractory / dt, int(refractory / dt)):
raise ValueError("refractory must be integer multiple of dt")
self.n = n
self.refractory = refractory
# Timing
self.dt = dt
self.t = t
self.n_steps = int(self.t * (1.0 / self.dt))
self.times = np.linspace(0, self.t, self.n_steps)
self.private_stdev = private_stdev
self.refractory = refractory
# Create uniform sampling distributions for each neuron
self.unifs = np.vstack(
[self.prng.uniform(0, 1, self.n_steps) for i in range(self.n)]
).transpose()
def _constraints(self, drive):
if drive.shape != self.times.shape:
raise ValueError("Shape of `drive` didn't match times")
if drive.ndim != 1:
raise ValueError("`drive` must be 1d")
def _refractory(self, spks):
lw = int(self.refractory / self.dt) # len of refractory window
# If it spiked at t, delete spikes
# in the refractory window
for t in range(spks.shape[0]):
mask = spks[t, :]
for t_plus in range(lw):
spks[t_plus, :][mask] = 0
return spks
def poisson(self, rates):
"""Simulate Poisson firing
Params
------
rates : array-like, 1d, > 0
The firing rate
"""
self._constraints(rates) # does no harm to check twice
# No bias unless private_stdev is specified
biases = np.zeros(self.n)
if self.private_stdev > 0:
biases = self.prng.normal(0, self.private_stdev, size=self.n)
# Poisson method taken from
# http://www.cns.nyu.edu/~david/handouts/poisson.pdf
spikes = np.zeros_like(self.unifs, np.int)
for j in range(self.n):
mask = self.unifs[:, j] <= ((rates + biases[j]) * self.dt)
spikes[mask, j] = 1
return self._refractory(spikes)
def sync_bursts(self, a0, f, k, var=1e-3):
"""Create synchronous bursts (1 ms variance) of thalamic-ish spike
Params
------
f : numeric
Oscillation frequency (Hz)
k : numeric
Number of neuron to spike at a time
"""
if k > self.n:
raise ValueError("k is larger than N")
if f < 0:
raise ValueError("f must be greater then 0")
if k < 0:
raise ValueError("k must be greater then 0")
# Locate about where the pulses of spikes will go, at f,
wl = 1 / float(f)
n_pulses = int(self.t * f)
pulses = []
t_p = 0
for _ in range(n_pulses):
t_p += wl
# Gaurd against negative ts
if t_p > (3 * var):
pulses.append(t_p)
# and fill in the pulses with Gaussin distributed spikes.
Ns = range(self.n)
ts = []
ns = []
for t in pulses:
ts += list(t + self.prng.normal(0, var, k))
# Assign spikes to random neurons, at most
# one spike / neuron
self.prng.shuffle(Ns)
ns += list(Ns)[0:k]
ts = np.array(ts)
ns = np.array(ns)
# Just in case any negative time any slipped trough
mask = ts > 0
ts = ts[mask]
ns = ns[mask]
spikes = to_spikes(ns, ts, self.t, self.n, self.dt)
# Create baseline firing
base = self.poisson(constant(self.times, a0))
spikes = base + spikes
spikes[spikes > 1] = 1
return spikes
| mit |
apoelstra/bitcoin | src/crc32c/.ycm_extra_conf.py | 53 | 4374 | # Copyright 2017 The CRC32C Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""YouCompleteMe configuration that interprets a .clang_complete file.
This module implementes the YouCompleteMe configuration API documented at:
https://github.com/Valloric/ycmd#ycm_extra_confpy-specification
The implementation loads and processes a .clang_complete file, documented at:
https://github.com/Rip-Rip/clang_complete/blob/master/README.md
"""
import os
# Flags added to the list in .clang_complete.
BASE_FLAGS = [
'-Werror', # Unlike clang_complete, YCM can also be used as a linter.
'-DUSE_CLANG_COMPLETER', # YCM needs this.
'-xc++', # YCM needs this to avoid compiling headers as C code.
]
# Clang flags that take in paths.
# See https://clang.llvm.org/docs/ClangCommandLineReference.html
PATH_FLAGS = [
'-isystem',
'-I',
'-iquote',
'--sysroot='
]
def DirectoryOfThisScript():
"""Returns the absolute path to the directory containing this script."""
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, build_root):
"""Expands relative paths in a list of Clang command-line flags.
Args:
flags: The list of flags passed to Clang.
build_root: The current directory when running the Clang compiler. Should be
an absolute path.
Returns:
A list of flags with relative paths replaced by absolute paths.
"""
new_flags = []
make_next_absolute = False
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(build_root, flag)
for path_flag in PATH_FLAGS:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(build_root, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FindNearest(target, path, build_root):
"""Looks for a file with a specific name closest to a project path.
This is similar to the logic used by a version-control system (like git) to
find its configuration directory (.git) based on the current directory when a
command is invoked.
Args:
target: The file name to search for.
path: The directory where the search starts. The search will explore the
given directory's ascendants using the parent relationship. Should be an
absolute path.
build_root: A directory that acts as a fence for the search. If the search
reaches this directory, it will not advance to its parent. Should be an
absolute path.
Returns:
The path to a file with the desired name. None if the search failed.
"""
candidate = os.path.join(path, target)
if os.path.isfile(candidate):
return candidate
if path == build_root:
return None
parent = os.path.dirname(path)
if parent == path:
return None
return FindNearest(target, parent, build_root)
def FlagsForClangComplete(file_path, build_root):
"""Reads the .clang_complete flags for a source file.
Args:
file_path: The path to the source file. Should be inside the project. Used
to locate the relevant .clang_complete file.
build_root: The current directory when running the Clang compiler for this
file. Should be an absolute path.
Returns:
A list of strings, where each element is a Clang command-line flag.
"""
clang_complete_path = FindNearest('.clang_complete', file_path, build_root)
if clang_complete_path is None:
return None
clang_complete_flags = open(clang_complete_path, 'r').read().splitlines()
return clang_complete_flags
def FlagsForFile(filename, **kwargs):
"""Implements the YouCompleteMe API."""
# kwargs can be used to pass 'client_data' to the YCM configuration. This
# configuration script does not need any extra information, so
# pylint: disable=unused-argument
build_root = DirectoryOfThisScript()
file_path = os.path.realpath(filename)
flags = BASE_FLAGS
clang_flags = FlagsForClangComplete(file_path, build_root)
if clang_flags:
flags += clang_flags
final_flags = MakeRelativePathsInFlagsAbsolute(flags, build_root)
return {'flags': final_flags}
| mit |
m4ns0ur/grumpy | third_party/stdlib/textwrap.py | 7 | 18245 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id$"
import string, re
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class _unicode(object):
pass
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper(object):
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
# whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
whitespace_trans = '\x00\x01\x02\x03\x04\x05\x06\x07\x08 \x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff'
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z' # end of chunk
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
# text = text.expandtabs()
text = ' '.join((' '.join(text.split('\n'))).split('\t'))
if self.replace_whitespace:
# if isinstance(text, str):
# text = text.translate(self.whitespace_trans)
# elif isinstance(text, _unicode):
# text = text.translate(self.unicode_whitespace_trans)
text = ' '.join(' '.join(text.split('\n')).split('\t'))
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, _unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
else:
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
# chunks = filter(None, chunks) # remove empty chunks
chunks = [x for x in chunks if x is not None]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
# del chunks[-1]
chunks.pop()
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
# del cur_line[-1]
cur_line.pop()
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
else:
margin = margin[:len(indent)]
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
| apache-2.0 |
Lyrositor/CoilSnake | coilsnake/ui/cli.py | 2 | 2095 | #! /usr/bin/env python
import argparse
import logging
from coilsnake.ui.common import compile_project, decompile_rom, upgrade_project, setup_logging
from coilsnake.ui.information import coilsnake_about
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("--quiet", help="silence all output", action="store_true")
subparsers = parser.add_subparsers()
compile_parser = subparsers.add_parser("compile", help="compile from project to rom")
compile_parser.add_argument("project_directory")
compile_parser.add_argument("base_rom")
compile_parser.add_argument("output_rom")
compile_parser.set_defaults(func=_compile)
decompile_parser = subparsers.add_parser("decompile", help="decompile from rom to project")
decompile_parser.add_argument("rom")
decompile_parser.add_argument("project_directory")
decompile_parser.set_defaults(func=_decompile)
upgrade_parser = subparsers.add_parser("upgrade",
help="upgrade a project which was created by an older version of CoilSnake")
upgrade_parser.add_argument("base_rom")
upgrade_parser.add_argument("project_directory")
upgrade_parser.set_defaults(func=_upgrade)
version_parser = subparsers.add_parser("version", help="display version information")
version_parser.set_defaults(func=_version)
args = parser.parse_args()
setup_logging(quiet=args.quiet, verbose=args.verbose)
args.func(args)
def _compile(args):
compile_project(project_path=args.project_directory,
base_rom_filename=args.base_rom,
output_rom_filename=args.output_rom)
def _decompile(args):
decompile_rom(rom_filename=args.rom,
project_path=args.project_directory)
def _upgrade(args):
upgrade_project(base_rom_filename=args.base_rom,
project_path=args.project_directory)
def _version(args):
print coilsnake_about() | gpl-3.0 |
mattclark/osf.io | osf/models/banner.py | 16 | 1944 | from django.db import models
from datetime import datetime
from osf.utils.storage import BannerImageStorage
from osf.exceptions import ValidationValueError
from osf.utils.fields import NonNaiveDateTimeField
def validate_banner_dates(banner_id, start_date, end_date):
if start_date > end_date:
raise ValidationValueError('Start date must be before end date.')
overlapping = ScheduledBanner.objects.filter(
(models.Q(start_date__gte=start_date) & models.Q(start_date__lte=end_date)) |
(models.Q(end_date__gte=start_date) & models.Q(end_date__lte=end_date)) |
(models.Q(start_date__lte=start_date) & models.Q(end_date__gte=end_date))
).exclude(id=banner_id).exists()
if overlapping:
raise ValidationValueError('Banners dates cannot be overlapping.')
class ScheduledBanner(models.Model):
class Meta:
# Custom permissions for use in the OSF Admin App
permissions = (
('view_scheduledbanner', 'Can view scheduled banner details'),
)
name = models.CharField(unique=True, max_length=256)
start_date = NonNaiveDateTimeField()
end_date = NonNaiveDateTimeField()
color = models.CharField(max_length=7)
license = models.CharField(blank=True, null=True, max_length=256)
link = models.URLField(blank=True, default='https://www.crowdrise.com/centerforopenscience')
default_photo = models.FileField(storage=BannerImageStorage())
default_alt_text = models.TextField()
mobile_photo = models.FileField(storage=BannerImageStorage())
mobile_alt_text = models.TextField(blank=True, null=True)
def save(self, *args, **kwargs):
self.start_date = datetime.combine(self.start_date, datetime.min.time())
self.end_date = datetime.combine(self.end_date, datetime.max.time())
validate_banner_dates(self.id, self.start_date, self.end_date)
super(ScheduledBanner, self).save(*args, **kwargs)
| apache-2.0 |
djeo94/CouchPotatoServer | libs/requests/packages/urllib3/util/connection.py | 679 | 3293 | import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
| gpl-3.0 |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/django/conf/locale/hu/formats.py | 504 | 1117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y. F j.'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = 'Y. F j. G.i'
YEAR_MONTH_FORMAT = 'Y. F'
MONTH_DAY_FORMAT = 'F j.'
SHORT_DATE_FORMAT = 'Y.m.d.'
SHORT_DATETIME_FORMAT = 'Y.m.d. G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y.%m.%d.', # '2006.10.25.'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59'
'%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200'
'%Y.%m.%d. %H.%M', # '2006.10.25. 14.30'
'%Y.%m.%d.', # '2006.10.25.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
| mit |
cchurch/ansible | lib/ansible/modules/cloud/vmware/vmware_vswitch_facts.py | 2 | 5209 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vswitch_facts
short_description: Gathers facts about an ESXi host's vswitch configurations
description:
- This module can be used to gather facts about an ESXi host's vswitch configurations when ESXi hostname or Cluster name is given.
- The vSphere Client shows the value for the number of ports as elastic from vSphere 5.5 and above.
- Other tools like esxcli might show the number of ports as 1536 or 5632.
- See U(https://kb.vmware.com/s/article/2064511) for more details.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Facts about vswitch belonging to every ESXi host systems under this cluster will be returned.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname to gather facts from.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather vswitch facts about all ESXi Host in given Cluster
vmware_vswitch_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_hosts_vswitch_facts
- name: Gather firewall facts about ESXi Host
vmware_vswitch_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: all_vswitch_facts
'''
RETURN = r'''
hosts_vswitch_facts:
description: metadata about host's vswitch configuration
returned: on success
type: dict
sample: {
"10.76.33.218": {
"vSwitch0": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic0"
]
},
"vSwitch_0011": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic2",
"vmnic1"
]
},
},
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VswitchFactsManager(PyVmomi):
"""Class to gather vSwitch facts"""
def __init__(self, module):
super(VswitchFactsManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
@staticmethod
def serialize_pnics(vswitch_obj):
"""Get pnic names"""
pnics = []
for pnic in vswitch_obj.pnic:
# vSwitch contains all PNICs as string in format of 'key-vim.host.PhysicalNic-vmnic0'
pnics.append(pnic.split("-", 3)[-1])
return pnics
def gather_vswitch_facts(self):
"""Gather vSwitch facts"""
hosts_vswitch_facts = dict()
for host in self.hosts:
network_manager = host.configManager.networkSystem
if network_manager:
temp_switch_dict = dict()
for available_vswitch in network_manager.networkInfo.vswitch:
temp_switch_dict[available_vswitch.name] = dict(
pnics=self.serialize_pnics(available_vswitch),
mtu=available_vswitch.mtu,
# we need to use the spec to get the ports
# otherwise, the output might be different compared to the vswitch config module
# (e.g. 5632 ports instead of 128)
num_ports=available_vswitch.spec.numPorts
)
hosts_vswitch_facts[host.name] = temp_switch_dict
return hosts_vswitch_facts
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
vmware_vswitch_mgr = VswitchFactsManager(module)
module.exit_json(changed=False, hosts_vswitch_facts=vmware_vswitch_mgr.gather_vswitch_facts())
if __name__ == "__main__":
main()
| gpl-3.0 |
HKUST-SING/tensorflow | tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py | 130 | 5863 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data in a constant.
Run using bazel:
bazel run --config opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data, pin to CPU because rest of pipeline is CPU-only
with tf.device('/cpu:0'):
input_images = tf.constant(data_sets.train.images)
input_labels = tf.constant(data_sets.train.labels)
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
vicky2135/lucious | lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 353 | 1139 | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| bsd-3-clause |
jggtrujillo/or-tools | examples/python/set_covering_deployment.py | 34 | 4144 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set covering deployment in Google CP Solver
From http://mathworld.wolfram.com/SetCoveringDeployment.html
'''
Set covering deployment (sometimes written 'set-covering deployment'
and abbreviated SCDP for 'set covering deployment problem') seeks
an optimal stationing of troops in a set of regions so that a
relatively small number of troop units can control a large
geographic region. ReVelle and Rosing (2000) first described
this in a study of Emperor Constantine the Great's mobile field
army placements to secure the Roman Empire.
'''
Compare with the the following models:
* MiniZinc: http://www.hakank.org/minizinc/set_covering_deployment.mzn
* Comet : http://www.hakank.org/comet/set_covering_deployment.co
* Gecode : http://www.hakank.org/gecode/set_covering_deployment.cpp
* ECLiPSe : http://www.hakank.org/eclipse/set_covering_deployment.ecl
* SICStus : http://hakank.org/sicstus/set_covering_deployment.pl
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Set covering deployment")
#
# data
#
countries = ["Alexandria",
"Asia Minor",
"Britain",
"Byzantium",
"Gaul",
"Iberia",
"Rome",
"Tunis"]
n = len(countries)
# the incidence matrix (neighbours)
mat = [
[0, 1, 0, 1, 0, 0, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 1, 1, 0]
]
#
# declare variables
#
# First army
X = [solver.IntVar(0, 1, "X[%i]" % i) for i in range(n)]
# Second (reserv) army
Y = [solver.IntVar(0, 1, "Y[%i]" % i) for i in range(n)]
#
# constraints
#
# total number of armies
num_armies = solver.Sum([X[i] + Y[i] for i in range(n)])
#
# Constraint 1: There is always an army in a city
# (+ maybe a backup)
# Or rather: Is there a backup, there
# must be an an army
#
[solver.Add(X[i] >= Y[i]) for i in range(n)]
#
# Constraint 2: There should always be an backup army near every city
#
for i in range(n):
neighbors = solver.Sum([Y[j] for j in range(n) if mat[i][j] == 1])
solver.Add(X[i] + neighbors >= 1)
objective = solver.Minimize(num_armies, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(X)
solution.Add(Y)
solution.Add(num_armies)
solution.AddObjective(num_armies)
collector = solver.LastSolutionCollector(solution)
solver.Solve(solver.Phase(X + Y,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT),
[collector, objective])
print "num_armies:", collector.ObjectiveValue(0)
print "X:", [collector.Value(0, X[i]) for i in range(n)]
print "Y:", [collector.Value(0, Y[i]) for i in range(n)]
for i in range(n):
if collector.Value(0, X[i]) == 1:
print "army:", countries[i],
if collector.Value(0, Y[i]) == 1:
print "reserv army:", countries[i], " "
print
print
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
main()
| apache-2.0 |
isaac-s/cloudify-manager-blueprints | components/manager-ip-setter/scripts/create.py | 2 | 2545 | #!/usr/bin/env python
import os
from os.path import join, dirname
import tempfile
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
SERVICE_NAME = 'manager-ip-setter'
runtime_props = ctx.instance.runtime_properties
ctx_properties = utils.ctx_factory.create(SERVICE_NAME)
MANAGER_IP_SETTER_DIR = join('/opt/cloudify', SERVICE_NAME)
runtime_props['files_to_remove'] = [MANAGER_IP_SETTER_DIR]
runtime_props['service_name'] = SERVICE_NAME
def deploy_utils():
temp_destination = join(tempfile.gettempdir(), 'utils.py')
ctx.download_resource_and_render(
join('components', 'utils.py'),
temp_destination,
)
utils_path = join(MANAGER_IP_SETTER_DIR, 'utils.py')
utils.move(temp_destination, utils_path)
utils.chmod('550', utils_path)
utils.chown('root', utils.CLOUDIFY_GROUP, utils_path)
def create_cloudify_user():
utils.create_service_user(
user=utils.CLOUDIFY_USER,
group=utils.CLOUDIFY_GROUP,
home=utils.CLOUDIFY_HOME_DIR
)
utils.mkdir(utils.CLOUDIFY_HOME_DIR)
def create_sudoers_file_and_disable_sudo_requiretty():
utils.sudo(['touch', utils.CLOUDIFY_SUDOERS_FILE])
utils.chmod('440', utils.CLOUDIFY_SUDOERS_FILE)
entry = 'Defaults:{user} !requiretty'.format(user=utils.CLOUDIFY_USER)
description = 'Disable sudo requiretty for {0}'.format(utils.CLOUDIFY_USER)
utils.add_entry_to_sudoers(entry, description)
def deploy_sudo_scripts():
scripts_to_deploy = {
'manager-ip-setter.sh': 'Run manager IP setter script',
'update-provider-context.py': 'Run update provider context script',
'create-internal-ssl-certs.py':
'Run the scripts that recreates internal SSL certs'
}
for script, description in scripts_to_deploy.items():
utils.deploy_sudo_command_script(script, description, SERVICE_NAME)
def install_manager_ip_setter():
utils.mkdir(MANAGER_IP_SETTER_DIR)
utils.set_service_as_cloudify_service(runtime_props)
deploy_utils()
deploy_sudo_scripts()
utils.systemd.configure(SERVICE_NAME)
def init_cloudify_user():
create_cloudify_user()
create_sudoers_file_and_disable_sudo_requiretty()
# Always create the cloudify user, but only install the scripts if flag is true
init_cloudify_user()
if os.environ.get('set_manager_ip_on_boot').lower() == 'true':
install_manager_ip_setter()
else:
ctx.logger.info('Set manager ip on boot is disabled.')
| apache-2.0 |
code4futuredotorg/reeborg_tw | src/libraries/brython_old/Lib/unittest/test/testmock/testmagicmethods.py | 737 | 12145 | import unittest
import inspect
import sys
from unittest.mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
benmoran56/FightstickDisplay | pyglet/gl/base.py | 2 | 13194 | from builtins import object
#!/usr/bin/python
# $Id:$
from pyglet import gl, compat_platform
from pyglet.gl import gl_info
from pyglet.gl import glu_info
class Config(object):
'''Graphics configuration.
A Config stores the preferences for OpenGL attributes such as the
number of auxilliary buffers, size of the colour and depth buffers,
double buffering, stencilling, multi- and super-sampling, and so on.
Different platforms support a different set of attributes, so these
are set with a string key and a value which is integer or boolean.
:Ivariables:
`double_buffer` : bool
Specify the presence of a back-buffer for every color buffer.
`stereo` : bool
Specify the presence of separate left and right buffer sets.
`buffer_size` : int
Total bits per sample per color buffer.
`aux_buffers` : int
The number of auxilliary color buffers.
`sample_buffers` : int
The number of multisample buffers.
`samples` : int
The number of samples per pixel, or 0 if there are no multisample
buffers.
`red_size` : int
Bits per sample per buffer devoted to the red component.
`green_size` : int
Bits per sample per buffer devoted to the green component.
`blue_size` : int
Bits per sample per buffer devoted to the blue component.
`alpha_size` : int
Bits per sample per buffer devoted to the alpha component.
`depth_size` : int
Bits per sample in the depth buffer.
`stencil_size` : int
Bits per sample in the stencil buffer.
`accum_red_size` : int
Bits per pixel devoted to the red component in the accumulation
buffer.
`accum_green_size` : int
Bits per pixel devoted to the green component in the accumulation
buffer.
`accum_blue_size` : int
Bits per pixel devoted to the blue component in the accumulation
buffer.
`accum_alpha_size` : int
Bits per pixel devoted to the alpha component in the accumulation
buffer.
'''
_attribute_names = [
'double_buffer',
'stereo',
'buffer_size',
'aux_buffers',
'sample_buffers',
'samples',
'red_size',
'green_size',
'blue_size',
'alpha_size',
'depth_size',
'stencil_size',
'accum_red_size',
'accum_green_size',
'accum_blue_size',
'accum_alpha_size',
'major_version',
'minor_version',
'forward_compatible',
'debug'
]
major_version = None
minor_version = None
forward_compatible = None
debug = None
def __init__(self, **kwargs):
'''Create a template config with the given attributes.
Specify attributes as keyword arguments, for example::
template = Config(double_buffer=True)
'''
for name in self._attribute_names:
if name in kwargs:
setattr(self, name, kwargs[name])
else:
setattr(self, name, None)
def _requires_gl_3(self):
if self.major_version is not None and self.major_version >= 3:
return True
if self.forward_compatible or self.debug:
return True
return False
def get_gl_attributes(self):
'''Return a list of attributes set on this config.
:rtype: list of tuple (name, value)
:return: All attributes, with unset attributes having a value of
``None``.
'''
return [(name, getattr(self, name)) for name in self._attribute_names]
def match(self, canvas):
'''Return a list of matching complete configs for the given canvas.
:since: pyglet 1.2
:Parameters:
`canvas` : `Canvas`
Display to host contexts created from the config.
:rtype: list of `CanvasConfig`
'''
raise NotImplementedError('abstract')
def create_context(self, share):
'''Create a GL context that satisifies this configuration.
:deprecated: Use `CanvasConfig.create_context`.
:Parameters:
`share` : `Context`
If not None, a context with which to share objects with.
:rtype: `Context`
:return: The new context.
'''
raise gl.ConfigException(
'This config cannot be used to create contexts. '
'Use Config.match to created a CanvasConfig')
def is_complete(self):
'''Determine if this config is complete and able to create a context.
Configs created directly are not complete, they can only serve
as templates for retrieving a supported config from the system.
For example, `pyglet.window.Screen.get_matching_configs` returns
complete configs.
:deprecated: Use ``isinstance(config, CanvasConfig)``.
:rtype: bool
:return: True if the config is complete and can create a context.
'''
return isinstance(self, CanvasConfig)
def __repr__(self):
import pprint
return '%s(%s)' % (self.__class__.__name__,
pprint.pformat(self.get_gl_attributes()))
class CanvasConfig(Config):
'''OpenGL configuration for a particular canvas.
Use `Config.match` to obtain an instance of this class.
:since: pyglet 1.2
:Ivariables:
`canvas` : `Canvas`
The canvas this config is valid on.
'''
def __init__(self, canvas, base_config):
self.canvas = canvas
self.major_version = base_config.major_version
self.minor_version = base_config.minor_version
self.forward_compatible = base_config.forward_compatible
self.debug = base_config.debug
def compatible(self, canvas):
raise NotImplementedError('abstract')
def create_context(self, share):
'''Create a GL context that satisifies this configuration.
:Parameters:
`share` : `Context`
If not None, a context with which to share objects with.
:rtype: `Context`
:return: The new context.
'''
raise NotImplementedError('abstract')
def is_complete(self):
return True
class ObjectSpace(object):
def __init__(self):
# Textures and buffers scheduled for deletion the next time this
# object space is active.
self._doomed_textures = []
self._doomed_buffers = []
class Context(object):
'''OpenGL context for drawing.
Use `CanvasConfig.create_context` to create a context.
:Ivariables:
`object_space` : `ObjectSpace`
An object which is shared between all contexts that share
GL objects.
'''
#: Context share behaviour indicating that objects should not be
#: shared with existing contexts.
CONTEXT_SHARE_NONE = None
#: Context share behaviour indicating that objects are shared with
#: the most recently created context (the default).
CONTEXT_SHARE_EXISTING = 1
# Used for error checking, True if currently within a glBegin/End block.
# Ignored if error checking is disabled.
_gl_begin = False
# gl_info.GLInfo instance, filled in on first set_current
_info = None
# List of (attr, check) for each driver/device-specific workaround that is
# implemented. The `attr` attribute on this context is set to the result
# of evaluating `check(gl_info)` the first time this context is used.
_workaround_checks = [
# GDI Generic renderer on Windows does not implement
# GL_UNPACK_ROW_LENGTH correctly.
('_workaround_unpack_row_length',
lambda info: info.get_renderer() == 'GDI Generic'),
# Reportedly segfaults in text_input.py example with
# "ATI Radeon X1600 OpenGL Engine"
# glGenBuffers not exported by
# "ATI Radeon X1270 x86/MMX/3DNow!/SSE2"
# "RADEON XPRESS 200M Series x86/MMX/3DNow!/SSE2"
# glGenBuffers not exported by
# "Intel 965/963 Graphics Media Accelerator"
('_workaround_vbo',
lambda info: (info.get_renderer().startswith('ATI Radeon X')
or info.get_renderer().startswith('RADEON XPRESS 200M')
or info.get_renderer() ==
'Intel 965/963 Graphics Media Accelerator')),
# Some ATI cards on OS X start drawing from a VBO before it's written
# to. In these cases pyglet needs to call glFinish() to flush the
# pipeline after updating a buffer but before rendering.
('_workaround_vbo_finish',
lambda info: ('ATI' in info.get_renderer() and
info.have_version(1, 5) and
compat_platform == 'darwin')),
]
def __init__(self, config, context_share=None):
self.config = config
self.context_share = context_share
self.canvas = None
if context_share:
self.object_space = context_share.object_space
else:
self.object_space = ObjectSpace()
def __repr__(self):
return '%s()' % self.__class__.__name__
def attach(self, canvas):
if self.canvas is not None:
self.detach()
if not self.config.compatible(canvas):
raise RuntimeError('Cannot attach %r to %r' % (canvas, self))
self.canvas = canvas
def detach(self):
self.canvas = None
def set_current(self):
if not self.canvas:
raise RuntimeError('Canvas has not been attached')
# XXX not per-thread
gl.current_context = self
# XXX
gl_info.set_active_context()
glu_info.set_active_context()
# Implement workarounds
if not self._info:
self._info = gl_info.GLInfo()
self._info.set_active_context()
for attr, check in self._workaround_checks:
setattr(self, attr, check(self._info))
# Release textures and buffers on this context scheduled for deletion.
# Note that the garbage collector may introduce a race condition,
# so operate on a copy of the textures/buffers and remove the deleted
# items using list slicing (which is an atomic operation)
if self.object_space._doomed_textures:
textures = self.object_space._doomed_textures[:]
textures = (gl.GLuint * len(textures))(*textures)
gl.glDeleteTextures(len(textures), textures)
self.object_space._doomed_textures[0:len(textures)] = []
if self.object_space._doomed_buffers:
buffers = self.object_space._doomed_buffers[:]
buffers = (gl.GLuint * len(buffers))(*buffers)
gl.glDeleteBuffers(len(buffers), buffers)
self.object_space._doomed_buffers[0:len(buffers)] = []
def destroy(self):
'''Release the context.
The context will not be useable after being destroyed. Each platform
has its own convention for releasing the context and the buffer(s)
that depend on it in the correct order; this should never be called
by an application.
'''
self.detach()
if gl.current_context is self:
gl.current_context = None
gl_info.remove_active_context()
# Switch back to shadow context.
if gl._shadow_window is not None:
gl._shadow_window.switch_to()
def delete_texture(self, texture_id):
'''Safely delete a texture belonging to this context.
Usually, the texture is released immediately using
``glDeleteTextures``, however if another context that does not share
this context's object space is currently active, the deletion will
be deferred until an appropriate context is activated.
:Parameters:
`texture_id` : int
The OpenGL name of the texture to delete.
'''
if self.object_space is gl.current_context.object_space:
id = gl.GLuint(texture_id)
gl.glDeleteTextures(1, id)
else:
self.object_space._doomed_textures.append(texture_id)
def delete_buffer(self, buffer_id):
'''Safely delete a buffer object belonging to this context.
This method behaves similarly to `delete_texture`, though for
``glDeleteBuffers`` instead of ``glDeleteTextures``.
:Parameters:
`buffer_id` : int
The OpenGL name of the buffer to delete.
:since: pyglet 1.1
'''
if self.object_space is gl.current_context.object_space and False:
id = gl.GLuint(buffer_id)
gl.glDeleteBuffers(1, id)
else:
self.object_space._doomed_buffers.append(buffer_id)
def get_info(self):
'''Get the OpenGL information for this context.
:since: pyglet 1.2
:rtype: `GLInfo`
'''
return self._info
| gpl-3.0 |
bitspill/electrum-doged | lib/bitcoin.py | 4 | 25273 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import base64
import re
import sys
import hmac
import version
from util import print_error, InvalidPassword
import ecdsa
import aes
################################## transactions
DUST_THRESHOLD = 0
DUST_SOFT_LIMIT = 1000000
MIN_RELAY_TX_FEE = 1000000
RECOMMENDED_FEE = 1000000
COINBASE_MATURITY = 100
# AES encryption
EncodeAES = lambda secret, s: base64.b64encode(aes.encryptData(secret,s))
DecodeAES = lambda secret, e: aes.decryptData(secret, base64.b64decode(e))
def strip_PKCS7_padding(s):
"""return s stripped of PKCS7 padding"""
if len(s)%16 or not s:
raise ValueError("String of len %d can't be PCKS7-padded" % len(s))
numpads = ord(s[-1])
if numpads > 16:
raise ValueError("String ending with %r can't be PCKS7-padded" % s[-1])
if s[-numpads:] != numpads*chr(numpads):
raise ValueError("Invalid PKCS7 padding")
return s[:-numpads]
# backport padding fix to AES module
aes.strip_PKCS7_padding = strip_PKCS7_padding
def aes_encrypt_with_iv(key, iv, data):
mode = aes.AESModeOfOperation.modeOfOperation["CBC"]
key = map(ord, key)
iv = map(ord, iv)
data = aes.append_PKCS7_padding(data)
keysize = len(key)
assert keysize in aes.AES.keySize.values(), 'invalid key size: %s' % keysize
moo = aes.AESModeOfOperation()
(mode, length, ciph) = moo.encrypt(data, mode, key, keysize, iv)
return ''.join(map(chr, ciph))
def aes_decrypt_with_iv(key, iv, data):
mode = aes.AESModeOfOperation.modeOfOperation["CBC"]
key = map(ord, key)
iv = map(ord, iv)
keysize = len(key)
assert keysize in aes.AES.keySize.values(), 'invalid key size: %s' % keysize
data = map(ord, data)
moo = aes.AESModeOfOperation()
decr = moo.decrypt(data, None, mode, key, keysize, iv)
decr = strip_PKCS7_padding(decr)
return decr
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, s.encode("utf8"))
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = DecodeAES(secret, s).decode("utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
def int_to_hex(i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def sha256(x):
return hashlib.sha256(x).digest()
def Hash(x):
if type(x) is unicode: x=x.encode('utf-8')
return sha256(sha256(x))
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
hmac_sha_512 = lambda x,y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
import mnemonic
x = mnemonic.prepare_seed(x)
s = hmac_sha_512("Seed version", x.encode('utf8')).encode('hex')
return s.startswith(prefix)
def is_old_seed(seed):
import old_mnemonic
words = seed.strip().split()
try:
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed.decode('hex')
is_hex = (len(seed) == 32 or len(seed) == 64)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
# pywallet openssl private key implementation
def i2d_ECPrivateKey(pkey, compressed=False):
if compressed:
key = '3081d30201010420' + \
'%064x' % pkey.secret + \
'a081a53081a2020101302c06072a8648ce3d0101022100' + \
'%064x' % _p + \
'3006040100040107042102' + \
'%064x' % _Gx + \
'022100' + \
'%064x' % _r + \
'020101a124032200'
else:
key = '308201130201010420' + \
'%064x' % pkey.secret + \
'a081a53081a2020101302c06072a8648ce3d0101022100' + \
'%064x' % _p + \
'3006040100040107044104' + \
'%064x' % _Gx + \
'%064x' % _Gy + \
'022100' + \
'%064x' % _r + \
'020101a144034200'
return key.decode('hex') + i2o_ECPublicKey(pkey.pubkey, compressed)
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return key.decode('hex')
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(sha256(public_key))
return md.digest()
except Exception:
import ripemd
md = ripemd.new(sha256(public_key))
return md.digest()
def public_key_to_bc_address(public_key):
h160 = hash_160(public_key)
return hash_160_to_bc_address(h160)
def hash_160_to_bc_address(h160, addrtype = 30):
vh160 = chr(addrtype) + h160
h = Hash(vh160)
addr = vh160 + h[0:4]
return base_encode(addr, base=58)
def bc_address_to_hash_160(addr):
bytes = base_decode(addr, 25, base=58)
return ord(bytes[0]), bytes[1:21]
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= base:
div, mod = divmod(long_value, base)
result = chars[mod] + result
long_value = div
result = chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (chars[0]*nPad) + result
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(c) * (base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
def PrivKeyToSecret(privkey):
return privkey[9:9+32]
def SecretToASecret(secret, compressed=False, addrtype=30):
vchIn = chr((addrtype+128)&255) + secret
if compressed: vchIn += '\01'
return EncodeBase58Check(vchIn)
def ASecretToSecret(key, addrtype=30):
vch = DecodeBase58Check(key)
if vch and vch[0] == chr((addrtype+128)&255):
return vch[1:]
else:
return False
def regenerate_key(sec):
b = ASecretToSecret(sec)
if not b:
return False
b = b[0:32]
return EC_KEY(b)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetPrivKey(pkey, compressed=False):
return i2d_ECPrivateKey(pkey, compressed)
def GetSecret(pkey):
return ('%064x' % pkey.secret).decode('hex')
def is_compressed(sec):
b = ASecretToSecret(sec)
return len(b) == 33
def public_key_from_private_key(sec):
# rebuild public key from private key, compressed or uncompressed
pkey = regenerate_key(sec)
assert pkey
compressed = is_compressed(sec)
public_key = GetPubKey(pkey.pubkey, compressed)
return public_key.encode('hex')
def address_from_private_key(sec):
public_key = public_key_from_private_key(sec)
address = public_key_to_bc_address(public_key.decode('hex'))
return address
def is_valid(addr):
return is_address(addr)
def is_address(addr):
ADDRESS_RE = re.compile('[1-9A-HJ-NP-Za-km-z]{26,}\\Z')
if not ADDRESS_RE.match(addr): return False
try:
addrtype, h = bc_address_to_hash_160(addr)
except Exception:
return False
return addr == hash_160_to_bc_address(h, addrtype)
def is_private_key(key):
try:
k = ASecretToSecret(key)
return k is not False
except:
return False
########### end pywallet functions #######################
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
varint = var_int(len(message))
encoded_varint = "".join([chr(int(varint[i:i+2], 16)) for i in xrange(0, len(varint), 2)])
return "\x19DogecoinDark Signed Message:\n" + encoded_varint + message
def verify_message(address, signature, message):
try:
EC_KEY.verify_message(address, signature, message)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, pubkey.decode('hex'))
def chunks(l, n):
return [l[i:i+n] for i in xrange(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)/4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return ( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) ).decode('hex')
return ( '04'+('%064x'%P.x())+('%064x'%P.y()) ).decode('hex')
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in ['\x02','\x03','\x04']
if Aser[0] == '\x04':
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0]=='\x03')[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid/2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return point_to_ser(self.pubkey.point, compressed).encode('hex')
def sign_message(self, message, compressed, address):
private_key = ecdsa.SigningKey.from_secret_exponent( self.secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic( Hash( msg_magic(message) ), hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string )
assert public_key.verify_digest( signature, Hash( msg_magic(message) ), sigdecode = ecdsa.util.sigdecode_string)
for i in range(4):
sig = base64.b64encode( chr(27 + i + (4 if compressed else 0)) + signature )
try:
self.verify_message( address, sig, message)
return sig
except Exception:
continue
else:
raise Exception("error: cannot sign message")
@classmethod
def verify_message(self, address, signature, message):
sig = base64.b64decode(signature)
if len(sig) != 65: raise Exception("Wrong encoding")
nV = ord(sig[0])
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
h = Hash( msg_magic(message) )
public_key = MyVerifyingKey.from_signature( sig[1:], recid, h, curve = SECP256k1 )
# check public key
public_key.verify_digest( sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# check that we get the original signing address
addr = public_key_to_bc_address( point_to_ser(public_key.pubkey.point, compressed) )
if address != addr:
raise Exception("Bad signature")
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True).decode('hex')
encrypted = 'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != 'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError, e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise Exception('invalid ciphertext: invalid mac')
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, rev_hex(int_to_hex(n,4)).decode('hex'), is_prime)
def _CKD_priv(k, c, s, is_prime):
import hmac
from ecdsa.util import string_to_number, number_to_string
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = chr(0) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, rev_hex(int_to_hex(n,4)).decode('hex'))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
import hmac
from ecdsa.util import string_to_number, number_to_string
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
BITCOIN_HEADER_PRIV = "0488ade4"
BITCOIN_HEADER_PUB = "0488b21e"
TESTNET_HEADER_PRIV = "04358394"
TESTNET_HEADER_PUB = "043587cf"
BITCOIN_HEADERS = (BITCOIN_HEADER_PUB, BITCOIN_HEADER_PRIV)
TESTNET_HEADERS = (TESTNET_HEADER_PUB, TESTNET_HEADER_PRIV)
BITCOIN_HEADER_ALT_PRIV = "019d9cfe"
BITCOIN_HEADER_ALT_PUB = "019da462"
TESTNET_HEADER_ALT_PRIV = "0436ef7d"
TESTNET_HEADER_ALT_PUB = "0436f6e1"
BITCOIN_HEADERS_ALT = (BITCOIN_HEADER_ALT_PUB, BITCOIN_HEADER_ALT_PRIV)
TESTNET_HEADERS_ALT = (TESTNET_HEADER_ALT_PUB, TESTNET_HEADER_ALT_PRIV)
def _get_headers(testnet):
"""Returns the correct headers for either testnet or bitcoin, in the form
of a 2-tuple, like (public, private)."""
if testnet:
return TESTNET_HEADERS
else:
return BITCOIN_HEADERS
def deserialize_xkey(xkey):
xkey = DecodeBase58Check(xkey)
assert len(xkey) == 78
xkey_header = xkey[0:4].encode('hex')
# Determine if the key is a bitcoin key or a testnet key.
if xkey_header in TESTNET_HEADERS:
head = TESTNET_HEADER_PRIV
elif xkey_header in BITCOIN_HEADERS:
head = BITCOIN_HEADER_PRIV
elif xkey_header in TESTNET_HEADERS_ALT:
head = TESTNET_HEADER_ALT_PRIV
elif xkey_header in BITCOIN_HEADERS_ALT:
head = BITCOIN_HEADER_ALT_PRIV
else:
raise Exception("Unknown xkey header: '%s'" % xkey_header)
depth = ord(xkey[4])
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
if xkey[0:4].encode('hex') == head:
K_or_k = xkey[13+33:]
else:
K_or_k = xkey[13+32:]
return depth, fingerprint, child_number, c, K_or_k
def get_xkey_name(xkey, testnet=False):
depth, fingerprint, child_number, c, K = deserialize_xkey(xkey)
n = int(child_number.encode('hex'), 16)
if n & BIP32_PRIME:
child_id = "%d'"%(n - BIP32_PRIME)
else:
child_id = "%d"%n
if depth == 0:
return ''
elif depth == 1:
return child_id
else:
raise BaseException("xpub depth error")
def xpub_from_xprv(xprv, testnet=False):
depth, fingerprint, child_number, c, k = deserialize_xkey(xprv)
K, cK = get_pubkeys_from_secret(k)
header_pub, _ = _get_headers(testnet)
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def bip32_root(seed, testnet=False):
import hmac
header_pub, header_priv = _get_headers(testnet)
I = hmac.new("Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = (header_priv + "00" + "00000000" + "00000000").decode("hex") + master_c + chr(0) + master_k
xpub = (header_pub + "00" + "00000000" + "00000000").decode("hex") + master_c + cK
return EncodeBase58Check(xprv), EncodeBase58Check(xpub)
def bip32_private_derivation(xprv, branch, sequence, testnet=False):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv, testnet)
header_pub, header_priv = _get_headers(testnet)
depth, fingerprint, child_number, c, k = deserialize_xkey(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
K, cK = get_pubkeys_from_secret(k)
xprv = header_priv.decode('hex') + chr(depth) + fingerprint + child_number + c + chr(0) + k
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xprv), EncodeBase58Check(xpub)
def bip32_public_derivation(xpub, branch, sequence, testnet=False):
header_pub, _ = _get_headers(testnet)
depth, fingerprint, child_number, c, cK = deserialize_xkey(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return SecretToASecret(k, True)
| gpl-3.0 |
arkmaxim/grpc | src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
fabioz/Pydev | plugins/org.python.pydev.core/pysrc/third_party/pep8/lib2to3/lib2to3/pytree.py | 325 | 29039 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
import warnings
from StringIO import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def set_prefix(self, prefix):
"""
Set the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("set_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
self.prefix = prefix
def get_prefix(self):
"""
Return the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("get_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
return self.prefix
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
for x in child.leaves():
yield x
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return u""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return unicode(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return u"".join(map(unicode, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
for node in child.post_order():
yield node
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
for node in child.pre_order():
yield node
def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + unicode(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def leaves(self):
yield self
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, basestring), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, basestring), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in xrange(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We don't do this on non-CPython implementation because
# they don't have this problem.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| epl-1.0 |
UnrememberMe/pants | src/python/pants/bin/remote_pants_runner.py | 2 | 4560 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import signal
import sys
import time
from contextlib import contextmanager
from pants.console.stty_utils import STTYSettings
from pants.java.nailgun_client import NailgunClient
from pants.java.nailgun_protocol import NailgunProtocol
from pants.pantsd.pants_daemon import PantsDaemon
from pants.util.collections import combined_dict
logger = logging.getLogger(__name__)
class RemotePantsRunner(object):
"""A thin client variant of PantsRunner."""
class Fallback(Exception):
"""Raised when fallback to an alternate execution mode is requested."""
class PortNotFound(Exception):
"""Raised when the pailgun port can't be found."""
PANTS_COMMAND = 'pants'
RECOVERABLE_EXCEPTIONS = (PortNotFound, NailgunClient.NailgunConnectionError)
def __init__(self, exiter, args, env, bootstrap_options, stdin=None, stdout=None, stderr=None):
"""
:param Exiter exiter: The Exiter instance to use for this run.
:param list args: The arguments (e.g. sys.argv) for this run.
:param dict env: The environment (e.g. os.environ) for this run.
:param Options bootstrap_options: The Options bag containing the bootstrap options.
:param file stdin: The stream representing stdin.
:param file stdout: The stream representing stdout.
:param file stderr: The stream representing stderr.
"""
self._start_time = time.time()
self._exiter = exiter
self._args = args
self._env = env
self._bootstrap_options = bootstrap_options
self._stdin = stdin or sys.stdin
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
@contextmanager
def _trapped_signals(self, client):
"""A contextmanager that overrides the SIGINT (control-c) and SIGQUIT (control-\) handlers
and handles them remotely."""
def handle_control_c(signum, frame):
client.send_control_c()
existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)
# N.B. SIGQUIT will abruptly kill the pantsd-runner, which will shut down the other end
# of the Pailgun connection - so we send a gentler SIGINT here instead.
existing_sigquit_handler = signal.signal(signal.SIGQUIT, handle_control_c)
# Retry interrupted system calls.
signal.siginterrupt(signal.SIGINT, False)
signal.siginterrupt(signal.SIGQUIT, False)
try:
yield
finally:
signal.signal(signal.SIGINT, existing_sigint_handler)
signal.signal(signal.SIGQUIT, existing_sigquit_handler)
def _setup_logging(self):
"""Sets up basic stdio logging for the thin client."""
log_level = logging.getLevelName(self._bootstrap_options.for_global_scope().level.upper())
formatter = logging.Formatter('%(levelname)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(log_level)
handler.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(log_level)
root.addHandler(handler)
def _connect_and_execute(self, port):
# Merge the nailgun TTY capability environment variables with the passed environment dict.
ng_env = NailgunProtocol.isatty_to_env(self._stdin, self._stdout, self._stderr)
modified_env = combined_dict(self._env, ng_env)
modified_env['PANTSD_RUNTRACKER_CLIENT_START_TIME'] = str(self._start_time)
assert isinstance(port, int), 'port {} is not an integer!'.format(port)
# Instantiate a NailgunClient.
client = NailgunClient(port=port,
ins=self._stdin,
out=self._stdout,
err=self._stderr,
exit_on_broken_pipe=True)
with self._trapped_signals(client), STTYSettings.preserved():
# Execute the command on the pailgun.
result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)
# Exit.
self._exiter.exit(result)
def _maybe_launch_pantsd(self):
return PantsDaemon.Factory.maybe_launch(bootstrap_options=self._bootstrap_options)
def run(self, args=None):
self._setup_logging()
port = self._maybe_launch_pantsd()
logger.debug('connecting to pailgun on port {}'.format(port))
try:
self._connect_and_execute(port)
except self.RECOVERABLE_EXCEPTIONS as e:
raise self.Fallback(e)
| apache-2.0 |
Didacti/botornado | botornado/s3/key.py | 1 | 40979 | #!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import mimetypes
import os
import re
import rfc822
import StringIO
import base64
import boto.utils
from boto.exception import BotoClientError
from boto.provider import Provider
from boto.s3.user import User
from boto import UserAgent
try:
from hashlib import md5
except ImportError:
from md5 import md5
from boto.s3.key import *
class AsyncKey(Key):
def __init__(self, bucket=None, name=None):
Key.__init__(self, bucket=bucket, name=name)
def __repr__(self):
if self.bucket:
return '<AsyncKey: %s,%s>' % (self.bucket.name, self.name)
else:
return '<AsyncKey: None,%s>' % self.name
def open_read(self, headers=None, query_args=None,
override_num_retries=None, response_headers=None, callback=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string (ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
def opened_read(response):
self.resp = response
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name,value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
if callable(callback):
callback(response)
self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries, callback=opened_read)
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None, callback=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries, callback=callback)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries, callback=callback)
else:
raise BotoClientError('Invalid mode: %s' % mode)
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
raise BotoClientError('Not Implemented')
def read(self, size=0, callback=None):
def _read(response):
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
if callable(callback):
callback(data)
self.open_read(callback=_read)
def exists(self, callback=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
def existence_tested(response):
if callable(callback):
callback(bool(response))
self.bucket.lookup(self.name, callback=existence_tested)
def delete(self, callback=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id, callback=callback)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, callback=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the file
transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
"""
provider = self.bucket.connection.provider
def sender(http_conn, method, path, data, headers, sendback=None):
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
if chunked_transfer:
# MD5 for the stream has to be calculated on the fly, as
# we don't know the size of the stream before hand.
m = md5()
else:
fp.seek(0)
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 3 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 3:
http_conn.set_debuglevel(0)
if cb:
if chunked_transfer:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred.
cb_count = (1024 * 1024)/self.BufferSize
self.size = 0
elif num_cb > 2:
cb_count = self.size / self.BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
l = fp.read(self.BufferSize)
while len(l) > 0:
if chunked_transfer:
http_conn.send('%x;\r\n' % len(l))
http_conn.send(l)
http_conn.send('\r\n')
else:
http_conn.send(l)
if cb:
total_bytes += len(l)
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes, self.size)
i = 0
if chunked_transfer:
m.update(l)
l = fp.read(self.BufferSize)
if chunked_transfer:
http_conn.send('0\r\n')
http_conn.send('\r\n')
if cb:
self.size = total_bytes
# Get the md5 which is calculated on the fly.
self.md5 = m.hexdigest()
else:
fp.seek(0)
if cb:
cb(total_bytes, self.size)
def sender_sent(response):
body = response.read()
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
if callable(sendback):
sendback(response)
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
if callable(sendback):
sendback(response)
else:
raise provider.storage_response_error(
response.status, response.reason, body)
http_conn.getresponse(callback=sender_sent)
if not headers:
headers = {}
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if headers.has_key('Content-Encoding'):
self.content_encoding = headers['Content-Encoding']
if headers.has_key('Content-Type'):
self.content_type = headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if not chunked_transfer:
headers['Content-Length'] = str(self.size)
# headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
def file_sent(resp):
self.handle_version_headers(resp, force=True)
if callable(callback):
callback(resp)
self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers,
sender=sender,
query_args=query_args, callback=file_sent)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None, callback=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the Content-Size and
Content-MD5 in the header. So for huge uploads, the delay in calculating
MD5 is avoided but with a penalty of inability to verify the integrity
of the uploaded data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
# Set the Transfer Encoding for Streams.
headers['Transfer-Encoding'] = 'chunked'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
def existence_tested(k):
if k:
if callable(callback):
callback(False)
else:
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback)
self.bucket.lookup(self.name, callback=existence_tested)
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, callback=None):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method
will first check to see if an object exists in the
bucket with the same key. If it does, it won't
overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the
file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of
the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
provider = self.bucket.connection.provider
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5:
md5 = self.compute_md5(fp)
else:
# even if md5 is provided, still need to set size of content
fp.seek(0, 2)
self.size = fp.tell()
fp.seek(0)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
def existence_tested(k):
if k:
if callable(callback):
callback(False)
else:
self.send_file(fp, headers, cb, num_cb, query_args, callback=callback)
self.bucket.lookup(self.name, callback=existence_tested)
return
self.send_file(fp, headers, cb, num_cb, query_args, callback=callback)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
fp = open(filename, 'rb')
def _set_contents_from_filename(response):
fp.close()
if callable(callback):
callback(response)
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key, callback=_set_contents_from_filename)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False, callback=None):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
def _set_contents_from_string(response):
fp.close()
if callable(callback):
callback(response)
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key, callback=_set_contents_from_string)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, callback=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if cb:
if num_cb > 2:
cb_count = self.size / self.BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = []
if torrent:
query_args.append('torrent')
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (key, response_headers[key]))
query_args = '&'.join(query_args)
def file_got(response):
body = self.resp.read()
fp.write(body)
if cb:
cb(total_bytes, self.size)
self.close()
self.bucket.connection.debug = save_debug
if callable(callback):
callback(response)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries, callback=file_got)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None, callback=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id, callback=callback)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers, callback=callback)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None, callback=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
def got_contents_to_filename(response):
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception: pass
if callable(callback):
callback(response)
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers, callback=got_contents_to_filename)
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, callback=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO.StringIO()
def got_contents_as_string(response):
if callable(callback):
callback(fp.getvalue())
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers, callback=got_contents_as_string)
def make_public(self, headers=None, callback=None):
self.bucket.set_canned_acl('public-read', self.name, headers, callback=callback)
# vim:set ft=python sw=4 :
| mit |
bregman-arie/ansible | lib/ansible/modules/cloud/cloudstack/cs_service_offering.py | 24 | 17072 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_service_offering
description:
- Create and delete service offerings for guest and system VMs.
- Update display_text of existing service offering.
short_description: Manages service offerings on Apache CloudStack based clouds.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
bytes_read_rate:
description:
- Bytes read rate of the disk offering.
bytes_write_rate:
description:
- Bytes write rate of the disk offering.
cpu_number:
description:
- The number of CPUs of the service offering.
cpu_speed:
description:
- The CPU speed of the service offering in MHz.
limit_cpu_usage:
description:
- Restrict the CPU usage to committed service offering.
choices: [ yes, no ]
deployment_planner:
description:
- The deployment planner heuristics used to deploy a VM of this offering.
- If not set, the value of global config C(vm.deployment.planner) is used.
display_text:
description:
- Display text of the service offering.
- If not set, C(name) will be used as C(display_text) while creating.
domain:
description:
- Domain the service offering is related to.
- Public for all domains and subdomains if not set.
host_tags:
description:
- The host tagsfor this service offering.
aliases:
- host_tag
hypervisor_snapshot_reserve:
description:
- Hypervisor snapshot reserve space as a percent of a volume.
- Only for managed storage using Xen or VMware.
disk_iops_customized:
description:
- Whether compute offering iops is custom or not.
default: false
disk_iops_read_rate:
description:
- IO requests read rate of the disk offering.
disk_iops_write_rate:
description:
- IO requests write rate of the disk offering.
disk_iops_max:
description:
- Max. iops of the compute offering.
disk_iops_min:
description:
- Min. iops of the compute offering.
is_system:
description:
- Whether it is a system VM offering or not.
choices: [ yes, no ]
default: no
is_volatile:
description:
- Whether the virtual machine needs to be volatile or not.
- Every reboot of VM the root disk is detached then destroyed and a fresh root disk is created and attached to VM.
choices: [ yes, no ]
default: no
memory:
description:
- The total memory of the service offering in MB.
name:
description:
- Name of the service offering.
required: true
network_rate:
description:
- Data transfer rate in Mb/s allowed.
- Supported only for non-system offering and system offerings having C(system_vm_type=domainrouter).
offer_ha:
description:
- Whether HA is set for the service offering.
choices: [ yes, no ]
default: no
provisioning_type:
description:
- Provisioning type used to create volumes.
choices:
- thin
- sparse
- fat
service_offering_details:
description:
- Details for planner, used to store specific parameters.
state:
description:
- State of the service offering.
choices:
- present
- absent
default: present
storage_type:
description:
- The storage type of the service offering.
choices:
- local
- shared
system_vm_type:
description:
- The system VM type.
- Required if C(is_system=true).
choices:
- domainrouter
- consoleproxy
- secondarystoragevm
storage_tags:
description:
- The storage tags for this service offering.
aliases:
- storage_tag
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Create a non-volatile compute service offering with local storage
local_action:
module: cs_service_offering
name: Micro
display_text: Micro 512mb 1cpu
cpu_number: 1
cpu_speed: 2198
memory: 512
host_tags: eco
storage_type: local
- name: Create a volatile compute service offering with shared storage
local_action:
module: cs_service_offering
name: Tiny
display_text: Tiny 1gb 1cpu
cpu_number: 1
cpu_speed: 2198
memory: 1024
storage_type: shared
is_volatile: true
host_tags: eco
storage_tags: eco
- name: Create or update a volatile compute service offering with shared storage
local_action:
module: cs_service_offering
name: Tiny
display_text: Tiny 1gb 1cpu
cpu_number: 1
cpu_speed: 2198
memory: 1024
storage_type: shared
is_volatile: yes
host_tags: eco
storage_tags: eco
- name: Remove a compute service offering
local_action:
module: cs_service_offering
name: Tiny
state: absent
- name: Create or update a system offering for the console proxy
local_action:
module: cs_service_offering
name: System Offering for Console Proxy 2GB
display_text: System Offering for Console Proxy 2GB RAM
is_system: yes
system_vm_type: consoleproxy
cpu_number: 1
cpu_speed: 2198
memory: 2048
storage_type: shared
storage_tags: perf
- name: Remove a system offering
local_action:
module: cs_service_offering
name: System Offering for Console Proxy 2GB
is_system: yes
state: absent
'''
RETURN = '''
---
id:
description: UUID of the service offering
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
cpu_number:
description: Number of CPUs in the service offering
returned: success
type: int
sample: 4
cpu_speed:
description: Speed of CPUs in MHz in the service offering
returned: success
type: int
sample: 2198
disk_iops_max:
description: Max iops of the disk offering
returned: success
type: int
sample: 1000
disk_iops_min:
description: Min iops of the disk offering
returned: success
type: int
sample: 500
disk_bytes_read_rate:
description: Bytes read rate of the service offering
returned: success
type: int
sample: 1000
disk_bytes_write_rate:
description: Bytes write rate of the service offering
returned: success
type: int
sample: 1000
disk_iops_read_rate:
description: IO requests per second read rate of the service offering
returned: success
type: int
sample: 1000
disk_iops_write_rate:
description: IO requests per second write rate of the service offering
returned: success
type: int
sample: 1000
created:
description: Date the offering was created
returned: success
type: string
sample: 2017-11-19T10:48:59+0000
display_text:
description: Display text of the offering
returned: success
type: string
sample: Micro 512mb 1cpu
domain:
description: Domain the offering is into
returned: success
type: string
sample: ROOT
host_tags:
description: List of host tags
returned: success
type: list
sample: [ 'eco' ]
storage_tags:
description: List of storage tags
returned: success
type: list
sample: [ 'eco' ]
is_system:
description: Whether the offering is for system VMs or not
returned: success
type: bool
sample: false
is_iops_customized:
description: Whether the offering uses custom IOPS or not
returned: success
type: bool
sample: false
is_volatile:
description: Whether the offering is volatile or not
returned: success
type: bool
sample: false
limit_cpu_usage:
description: Whether the CPU usage is restricted to committed service offering
returned: success
type: bool
sample: false
memory:
description: Memory of the system offering
returned: success
type: int
sample: 512
name:
description: Name of the system offering
returned: success
type: string
sample: Micro
offer_ha:
description: Whether HA support is enabled in the offering or not
returned: success
type: bool
sample: false
provisioning_type:
description: Provisioning type used to create volumes
returned: success
type: string
sample: thin
storage_type:
description: Storage type used to create volumes
returned: success
type: string
sample: shared
system_vm_type:
description: System VM type of this offering
returned: success
type: string
sample: consoleproxy
service_offering_details:
description: Additioanl service offering details
returned: success
type: dict
sample: "{'vgpuType': 'GRID K180Q','pciDevice':'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}"
network_rate:
description: Data transfer rate in megabits per second allowed
returned: success
type: int
sample: 1000
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackServiceOffering(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackServiceOffering, self).__init__(module)
self.returns = {
'cpunumber': 'cpu_number',
'cpuspeed': 'cpu_speed',
'deploymentplanner': 'deployment_planner',
'diskBytesReadRate': 'disk_bytes_read_rate',
'diskBytesWriteRate': 'disk_bytes_write_rate',
'diskIopsReadRate': 'disk_iops_read_rate',
'diskIopsWriteRate': 'disk_iops_write_rate',
'maxiops': 'disk_iops_max',
'miniops': 'disk_iops_min',
'hypervisorsnapshotreserve': 'hypervisor_snapshot_reserve',
'iscustomized': 'is_customized',
'iscustomizediops': 'is_iops_customized',
'issystem': 'is_system',
'isvolatile': 'is_volatile',
'limitcpuuse': 'limit_cpu_usage',
'memory': 'memory',
'networkrate': 'network_rate',
'offerha': 'offer_ha',
'provisioningtype': 'provisioning_type',
'serviceofferingdetails': 'service_offering_details',
'storagetype': 'storage_type',
'systemvmtype': 'system_vm_type',
'tags': 'storage_tags',
}
def get_service_offering(self):
args = {
'name': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
'issystem': self.module.params.get('is_system'),
'systemvmtype': self.module.params.get('system_vm_type'),
}
service_offerings = self.query_api('listServiceOfferings', **args)
if service_offerings:
return service_offerings['serviceoffering'][0]
def present_service_offering(self):
service_offering = self.get_service_offering()
if not service_offering:
service_offering = self._create_offering(service_offering)
else:
service_offering = self._update_offering(service_offering)
return service_offering
def absent_service_offering(self):
service_offering = self.get_service_offering()
if service_offering:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'id': service_offering['id'],
}
self.query_api('deleteServiceOffering', **args)
return service_offering
def _create_offering(self, service_offering):
self.result['changed'] = True
system_vm_type = self.module.params.get('system_vm_type')
is_system = self.module.params.get('is_system')
required_params = []
if is_system and not system_vm_type:
required_params.append('system_vm_type')
self.module.fail_on_missing_params(required_params=required_params)
args = {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'bytesreadrate': self.module.params.get('disk_bytes_read_rate'),
'byteswriterate': self.module.params.get('disk_bytes_write_rate'),
'cpunumber': self.module.params.get('cpu_number'),
'cpuspeed': self.module.params.get('cpu_speed'),
'customizediops': self.module.params.get('is_iops_customized'),
'deploymentplanner': self.module.params.get('deployment_planner'),
'domainid': self.get_domain(key='id'),
'hosttags': self.module.params.get('host_tags'),
'hypervisorsnapshotreserve': self.module.params.get('hypervisor_snapshot_reserve'),
'iopsreadrate': self.module.params.get('disk_iops_read_rate'),
'iopswriterate': self.module.params.get('disk_iops_write_rate'),
'maxiops': self.module.params.get('disk_iops_max'),
'miniops': self.module.params.get('disk_iops_min'),
'issystem': is_system,
'isvolatile': self.module.params.get('is_volatile'),
'memory': self.module.params.get('memory'),
'networkrate': self.module.params.get('network_rate'),
'offerha': self.module.params.get('offer_ha'),
'provisioningtype': self.module.params.get('provisioning_type'),
'serviceofferingdetails': self.module.params.get('service_offering_details'),
'storagetype': self.module.params.get('storage_type'),
'systemvmtype': system_vm_type,
'tags': self.module.params.get('storage_tags'),
'limitcpuuse': self.module.params.get('limit_cpu_usage')
}
if not self.module.check_mode:
res = self.query_api('createServiceOffering', **args)
service_offering = res['serviceoffering']
return service_offering
def _update_offering(self, service_offering):
args = {
'id': service_offering['id'],
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
}
if self.has_changed(args, service_offering):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateServiceOffering', **args)
service_offering = res['serviceoffering']
return service_offering
def get_result(self, service_offering):
super(AnsibleCloudStackServiceOffering, self).get_result(service_offering)
if service_offering:
if 'hosttags' in service_offering:
self.result['host_tags'] = service_offering['hosttags'].split(',') or [service_offering['hosttags']]
# Prevent confusion, the api returns a tags key for storage tags.
if 'tags' in service_offering:
self.result['storage_tags'] = service_offering['tags'].split(',') or [service_offering['tags']]
if 'tags' in self.result:
del self.result['tags']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
cpu_number=dict(type='int'),
cpu_speed=dict(type='int'),
limit_cpu_usage=dict(type='bool'),
deployment_planner=dict(),
domain=dict(),
host_tags=dict(type='list', aliases=['host_tag']),
hypervisor_snapshot_reserve=dict(type='int'),
disk_bytes_read_rate=dict(type='int'),
disk_bytes_write_rate=dict(type='int'),
disk_iops_customized=dict(type='bool'),
disk_iops_read_rate=dict(type='int'),
disk_iops_write_rate=dict(type='int'),
disk_iops_max=dict(type='int'),
disk_iops_min=dict(type='int'),
is_system=dict(type='bool', default=False),
is_volatile=dict(type='bool'),
is_iops_customized=dict(type='bool'),
memory=dict(type='int'),
network_rate=dict(type='int'),
offer_ha=dict(type='bool'),
provisioning_type=dict(choices=['thin', 'sparse', 'fat']),
service_offering_details=dict(type='bool'),
storage_type=dict(choice=['local', 'shared']),
system_vm_type=dict(choice=['domainrouter', 'consoleproxy', 'secondarystoragevm']),
storage_tags=dict(type='list', aliases=['storage_tag']),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_so = AnsibleCloudStackServiceOffering(module)
state = module.params.get('state')
if state == "absent":
service_offering = acs_so.absent_service_offering()
else:
service_offering = acs_so.present_service_offering()
result = acs_so.get_result(service_offering)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
abgoswam/data-science-from-scratch | code-python3/simple_linear_regression.py | 4 | 3953 | from collections import Counter, defaultdict
from linear_algebra import vector_subtract
from statistics import mean, correlation, standard_deviation, de_mean
from gradient_descent import minimize_stochastic
import math, random
def predict(alpha, beta, x_i):
return beta * x_i + alpha
def error(alpha, beta, x_i, y_i):
return y_i - predict(alpha, beta, x_i)
def sum_of_squared_errors(alpha, beta, x, y):
return sum(error(alpha, beta, x_i, y_i) ** 2
for x_i, y_i in zip(x, y))
def least_squares_fit(x,y):
"""given training values for x and y,
find the least-squares values of alpha and beta"""
beta = correlation(x, y) * standard_deviation(y) / standard_deviation(x)
alpha = mean(y) - beta * mean(x)
return alpha, beta
def total_sum_of_squares(y):
"""the total squared variation of y_i's from their mean"""
return sum(v ** 2 for v in de_mean(y))
def r_squared(alpha, beta, x, y):
"""the fraction of variation in y captured by the model, which equals
1 - the fraction of variation in y not captured by the model"""
return 1.0 - (sum_of_squared_errors(alpha, beta, x, y) /
total_sum_of_squares(y))
def squared_error(x_i, y_i, theta):
alpha, beta = theta
return error(alpha, beta, x_i, y_i) ** 2
def squared_error_gradient(x_i, y_i, theta):
alpha, beta = theta
return [-2 * error(alpha, beta, x_i, y_i), # alpha partial derivative
-2 * error(alpha, beta, x_i, y_i) * x_i] # beta partial derivative
if __name__ == "__main__":
num_friends_good = [49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
daily_minutes_good = [68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]
alpha, beta = least_squares_fit(num_friends_good, daily_minutes_good)
print("alpha", alpha)
print("beta", beta)
print("r-squared", r_squared(alpha, beta, num_friends_good, daily_minutes_good))
print()
print("gradient descent:")
# choose random value to start
random.seed(0)
theta = [random.random(), random.random()]
alpha, beta = minimize_stochastic(squared_error,
squared_error_gradient,
num_friends_good,
daily_minutes_good,
theta,
0.0001)
print("alpha", alpha)
print("beta", beta)
| unlicense |
ran5515/DeepDecision | tensorflow/contrib/distributions/python/ops/bijectors/sigmoid_centered.py | 85 | 1166 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SigmoidCentered bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid_centered_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["SigmoidCentered"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/pilkit/utils.py | 2 | 12321 | import os
import mimetypes
import sys
from io import UnsupportedOperation
from .exceptions import UnknownExtension, UnknownFormat
from .lib import Image, ImageFile, StringIO, string_types
RGBA_TRANSPARENCY_FORMATS = ['PNG']
PALETTE_TRANSPARENCY_FORMATS = ['PNG', 'GIF']
DEFAULT_EXTENSIONS = {
'JPEG': '.jpg',
}
def img_to_fobj(img, format, autoconvert=True, **options):
return save_image(img, StringIO(), format, options, autoconvert)
def open_image(target):
target.seek(0)
return Image.open(target)
_pil_init = 0
def _preinit_pil():
"""Loads the standard PIL file format drivers. Returns True if ``preinit()``
was called (and there's a potential that more drivers were loaded) or False
if there is no possibility that new drivers were loaded.
"""
global _pil_init
if _pil_init < 1:
Image.preinit()
_pil_init = 1
return True
return False
def _init_pil():
"""Loads all PIL file format drivers. Returns True if ``init()`` was called
(and there's a potential that more drivers were loaded) or False if there is
no possibility that new drivers were loaded.
"""
global _pil_init
_preinit_pil()
if _pil_init < 2:
Image.init()
_pil_init = 2
return True
return False
def _extension_to_format(extension):
return Image.EXTENSION.get(extension.lower())
def _format_to_extension(format):
if format:
format = format.upper()
if format in DEFAULT_EXTENSIONS:
ext = DEFAULT_EXTENSIONS[format]
# It's not enough for an extension to be listed in
# ``DEFAULT_EXTENSIONS``, it must also be recognized by PIL.
if ext in Image.EXTENSION:
return ext
for k, v in Image.EXTENSION.items():
if v == format:
return k
return None
def extension_to_mimetype(ext):
try:
filename = 'a%s' % (ext or '') # guess_type requires a full filename, not just an extension
mimetype = mimetypes.guess_type(filename)[0]
except IndexError:
mimetype = None
return mimetype
def format_to_mimetype(format):
return extension_to_mimetype(format_to_extension(format))
def extension_to_format(extension):
"""Returns the format that corresponds to the provided extension.
"""
format = _extension_to_format(extension)
if not format and _preinit_pil():
format = _extension_to_format(extension)
if not format and _init_pil():
format = _extension_to_format(extension)
if not format:
raise UnknownExtension(extension)
return format
def format_to_extension(format):
"""Returns the first extension that matches the provided format.
"""
extension = None
if format:
extension = _format_to_extension(format)
if not extension and _preinit_pil():
extension = _format_to_extension(format)
if not extension and _init_pil():
extension = _format_to_extension(format)
if not extension:
raise UnknownFormat(format)
return extension
def suggest_extension(name, format):
original_extension = os.path.splitext(name)[1]
try:
suggested_extension = format_to_extension(format)
except UnknownFormat:
extension = original_extension
else:
if suggested_extension.lower() == original_extension.lower():
extension = original_extension
else:
try:
original_format = extension_to_format(original_extension)
except UnknownExtension:
extension = suggested_extension
else:
# If the formats match, give precedence to the original extension.
if format.lower() == original_format.lower():
extension = original_extension
else:
extension = suggested_extension
return extension
class FileWrapper(object):
def __init__(self, wrapped):
super(FileWrapper, self).__setattr__('_wrapped', wrapped)
def fileno(self):
try:
return self._wrapped.fileno()
except UnsupportedOperation:
raise AttributeError
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
return setattr(self._wrapped, name, value)
def __delattr__(self, key):
return delattr(self._wrapped, key)
def save_image(img, outfile, format, options=None, autoconvert=True):
"""
Wraps PIL's ``Image.save()`` method. There are two main benefits of using
this function over PIL's:
1. It gracefully handles the infamous "Suspension not allowed here" errors.
2. It prepares the image for saving using ``prepare_image()``, which will do
some common-sense processing given the target format.
"""
options = options or {}
if autoconvert:
img, save_kwargs = prepare_image(img, format)
# Use returned from prepare_image arguments for base
# and update them with provided options. Then use the result
save_kwargs.update(options)
options = save_kwargs
# Attempt to reset the file pointer.
try:
outfile.seek(0)
except AttributeError:
pass
def save(fp):
with quiet():
img.save(fp, format, **options)
# Some versions of PIL only catch AttributeErrors where they should also
# catch UnsupportedOperation exceptions. To work around this, we wrap the
# file with an object that will raise the type of error it wants.
if any(isinstance(outfile, t) for t in string_types):
# ...but don't wrap strings.
wrapper = outfile
else:
wrapper = FileWrapper(outfile)
try:
save(wrapper)
except IOError:
# PIL can have problems saving large JPEGs if MAXBLOCK isn't big enough,
# So if we have a problem saving, we temporarily increase it. See
# http://github.com/matthewwithanm/django-imagekit/issues/50
# https://github.com/matthewwithanm/django-imagekit/issues/134
# https://github.com/python-imaging/Pillow/issues/148
# https://github.com/matthewwithanm/pilkit/commit/0f914e8b40e3d30f28e04ffb759b262aa8a1a082#commitcomment-3885362
# MAXBLOCK must be at least as big as...
new_maxblock = max(
(len(options['exif']) if 'exif' in options else 0) + 5, # ...the entire exif header block
img.size[0] * 4, # ...a complete scan line
3 * img.size[0] * img.size[1], # ...3 bytes per every pixel in the image
)
if new_maxblock < ImageFile.MAXBLOCK:
raise
old_maxblock = ImageFile.MAXBLOCK
ImageFile.MAXBLOCK = new_maxblock
try:
save(wrapper)
finally:
ImageFile.MAXBLOCK = old_maxblock
try:
outfile.seek(0)
except AttributeError:
pass
return outfile
class quiet(object):
"""
A context manager for suppressing the stderr activity of PIL's C libraries.
Based on http://stackoverflow.com/a/978264/155370
"""
def __enter__(self):
try:
self.stderr_fd = sys.__stderr__.fileno()
except AttributeError:
# In case of Azure, the file descriptor is not present so we can return
# from here
return
try:
self.null_fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# If dev/null isn't writeable, then they just have to put up with
# the noise.
return
self.old = os.dup(self.stderr_fd)
os.dup2(self.null_fd, self.stderr_fd)
def __exit__(self, *args, **kwargs):
if not getattr(self, 'null_fd', None):
return
if not getattr(self, 'old', None):
return
os.dup2(self.old, self.stderr_fd)
os.close(self.null_fd)
os.close(self.old)
def prepare_image(img, format):
"""
Prepares the image for saving to the provided format by doing some
common-sense conversions. This includes things like preserving transparency
and quantizing. This function is used automatically by ``save_image()``
immediately before saving unless you specify ``autoconvert=False``. It is
provided as a utility for those doing their own processing.
:param img: The image to prepare for saving.
:param format: The format that the image will be saved to.
"""
make_opaque = False
save_kwargs = {}
format = format.upper()
if img.mode == 'RGBA':
if format in RGBA_TRANSPARENCY_FORMATS:
pass
elif format in PALETTE_TRANSPARENCY_FORMATS:
# If you're going from a format with alpha transparency to one
# with palette transparency, transparency values will be
# snapped: pixels that are more opaque than not will become
# fully opaque; pixels that are more transparent than not will
# become fully transparent. This will not produce a good-looking
# result if your image contains varying levels of opacity; in
# that case, you'll probably want to use a processor to composite
# the image on a solid color. The reason we don't do this by
# default is because not doing so allows processors to treat
# RGBA-format images as a super-type of P-format images: if you
# have an RGBA-format image with only a single transparent
# color, and save it as a GIF, it will retain its transparency.
# In other words, a P-format image converted to an
# RGBA-formatted image by a processor and then saved as a
# P-format image will give the expected results.
# Work around a bug in PIL: split() doesn't check to see if
# img is loaded.
img.load()
alpha = img.split()[-1]
mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE,
colors=255)
img.paste(255, mask)
save_kwargs['transparency'] = 255
else:
# Simply converting an RGBA-format image to an RGB one creates a
# gross result, so we paste the image onto a white background. If
# that's not what you want, that's fine: use a processor to deal
# with the transparency however you want. This is simply a
# sensible default that will always produce something that looks
# good. Or at least, it will look better than just a straight
# conversion.
make_opaque = True
elif img.mode == 'P':
if format in PALETTE_TRANSPARENCY_FORMATS:
try:
save_kwargs['transparency'] = img.info['transparency']
except KeyError:
pass
elif format in RGBA_TRANSPARENCY_FORMATS:
# Currently PIL doesn't support any RGBA-mode formats that
# aren't also P-mode formats, so this will never happen.
img = img.convert('RGBA')
else:
make_opaque = True
else:
img = img.convert('RGB')
# GIFs are always going to be in palette mode, so we can do a little
# optimization. Note that the RGBA sources also use adaptive
# quantization (above). Images that are already in P mode don't need
# any quantization because their colors are already limited.
if format == 'GIF':
img = img.convert('P', palette=Image.ADAPTIVE)
if make_opaque:
from .processors import MakeOpaque
img = MakeOpaque().process(img).convert('RGB')
if format == 'JPEG':
save_kwargs['optimize'] = True
return img, save_kwargs
def process_image(img, processors=None, format=None, autoconvert=True, options=None):
from .processors import ProcessorPipeline
original_format = img.format
# Run the processors
img = ProcessorPipeline(processors or []).process(img)
format = format or img.format or original_format or 'JPEG'
options = options or {}
return img_to_fobj(img, format, autoconvert, **options)
| apache-2.0 |
arnuschky/bitcoin | qa/rpc-tests/test_framework/util.py | 33 | 13438 | # Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
devnull = open(os.devnull, "w")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open(os.devnull, "w")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling bitcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| mit |
cboling/SDNdbg | docs/old-stuff/pydzcvr/doc/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py | 17 | 6150 | # Copyright 2014 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nec: delete old ofc mapping tables
Revision ID: 117643811bca
Revises: 81c553f3776c
Create Date: 2014-03-02 05:26:47.073318
"""
# revision identifiers, used by Alembic.
revision = '117643811bca'
down_revision = '81c553f3776c'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext import compiler as sa_compiler
from sqlalchemy.sql import expression as sa_expr
from neutron.db import migration
# sqlalchemy does not support the expression:
# INSERT INTO <table> (<column>, ...) (SELECT ...)
# The following class is to support this expression.
# Reference: http://docs.sqlalchemy.org/en/rel_0_9/core/compiler.html
# section: "Compiling sub-elements of a custom expression construct"
class InsertFromSelect(sa_expr.Executable, sa_expr.ClauseElement):
_execution_options = (sa_expr.Executable._execution_options.
union({'autocommit': True}))
def __init__(self, insert_spec, select):
self.insert_spec = insert_spec
self.select = select
@sa_compiler.compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
if type(element.insert_spec) == list:
columns = []
for column in element.insert_spec:
columns.append(column.name)
table = compiler.process(element.insert_spec[0].table, asfrom=True)
columns = ", ".join(columns)
sql = ("INSERT INTO %s (%s) (%s)" %
(table, columns, compiler.process(element.select)))
else:
sql = ("INSERT INTO %s (%s)" %
(compiler.process(element.insert_spec, asfrom=True),
compiler.process(element.select)))
return sql
def upgrade():
# Table definitions below are only used for sqlalchemy to generate
# SQL statements, so in networks/ports tables only required field
# are declared. Note that 'quantum_id' in OFC ID mapping tables
# will be renamed in a later patch (bug 1287432).
if not migration.schema_has_table('ofctenants'):
# Assume that, in the database we are migrating from, the
# configured plugin did not create any ofc tables.
return
ofctenants = sa_expr.table(
'ofctenants',
sa_expr.column('id'),
sa_expr.column('quantum_id'))
ofcnetworks = sa_expr.table(
'ofcnetworks',
sa_expr.column('id'),
sa_expr.column('quantum_id'))
ofcports = sa_expr.table(
'ofcports',
sa_expr.column('id'),
sa_expr.column('quantum_id'))
ofcfilters = sa_expr.table(
'ofcfilters',
sa_expr.column('id'),
sa_expr.column('quantum_id'))
ofctenantmappings = sa_expr.table(
'ofctenantmappings',
sa_expr.column('ofc_id'),
sa_expr.column('quantum_id'))
ofcnetworkmappings = sa_expr.table(
'ofcnetworkmappings',
sa_expr.column('ofc_id'),
sa_expr.column('quantum_id'))
ofcportmappings = sa_expr.table(
'ofcportmappings',
sa_expr.column('ofc_id'),
sa_expr.column('quantum_id'))
ofcfiltermappings = sa_expr.table(
'ofcfiltermappings',
sa_expr.column('ofc_id'),
sa_expr.column('quantum_id'))
networks = sa_expr.table(
'networks',
sa_expr.column('id'),
sa_expr.column('tenant_id'))
ports = sa_expr.table(
'ports',
sa_expr.column('id'),
sa_expr.column('network_id'))
# ofctenants -> ofctenantmappings
select_obj = sa.select([ofctenants.c.quantum_id,
op.inline_literal('/tenants/') + ofctenants.c.id])
stmt = InsertFromSelect([ofctenantmappings.c.quantum_id,
ofctenantmappings.c.ofc_id],
select_obj)
op.execute(stmt)
# ofcnetworks -> ofcnetworkmappings
select_obj = ofcnetworks.join(
networks,
ofcnetworks.c.quantum_id == networks.c.id)
select_obj = select_obj.join(
ofctenantmappings,
ofctenantmappings.c.quantum_id == networks.c.tenant_id)
select_obj = sa.select(
[ofcnetworks.c.quantum_id,
(ofctenantmappings.c.ofc_id +
op.inline_literal('/networks/') + ofcnetworks.c.id)],
from_obj=select_obj)
stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id,
ofcnetworkmappings.c.ofc_id],
select_obj)
op.execute(stmt)
# ofcports -> ofcportmappings
select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id)
select_obj = select_obj.join(
ofcnetworkmappings,
ofcnetworkmappings.c.quantum_id == ports.c.network_id)
select_obj = sa.select(
[ofcports.c.quantum_id,
(ofcnetworkmappings.c.ofc_id +
op.inline_literal('/ports/') + ofcports.c.id)],
from_obj=select_obj)
stmt = InsertFromSelect([ofcportmappings.c.quantum_id,
ofcportmappings.c.ofc_id],
select_obj)
op.execute(stmt)
# ofcfilters -> ofcfiltermappings
select_obj = sa.select([ofcfilters.c.quantum_id,
op.inline_literal('/filters/') + ofcfilters.c.id])
stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id,
ofcfiltermappings.c.ofc_id],
select_obj)
op.execute(stmt)
# drop old mapping tables
op.drop_table('ofctenants')
op.drop_table('ofcnetworks')
op.drop_table('ofcports')
op.drop_table('ofcfilters')
def downgrade():
pass
| apache-2.0 |
cdht/androguard | demos/dad_emul.py | 61 | 5277 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append('./')
from androguard.core.bytecodes import apk, dvm
from androguard.core.analysis.analysis import uVMAnalysis
from androguard.decompiler.dad.decompile import DvMethod
from androguard.decompiler.dad.instruction import (Constant,
BinaryCompExpression)
class DemoEmulator(object):
def __init__(self, graph):
self.graph = graph
self.loop = []
self.mem = {}
def init(self, key, value):
self.mem[key] = value
def visit(self, node):
if node not in self.loop:
node.visit(self)
def visit_ins(self, ins):
return ins.visit(self)
def visit_loop_node(self, loop):
self.loop.append(loop)
follow = loop.get_loop_follow()
if loop.looptype.pretest():
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
while loop.visit_cond(self):
loop.true.visit(self)
self.loop.pop()
if follow is not None:
self.visit(follow)
def visit_cond_node(self, cond):
follow = cond.get_if_follow()
if follow is not None:
has_else = not (follow in (cond.true, cond.false))
cnd = cond.visit_cond(self)
if cnd:
cond.true.visit(self)
elif has_else:
cond.false.visit(self)
self.visit(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs):
self.visit(sucs[0])
def visit_return_node(self, ret):
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_constant(self, cst):
return cst
def visit_variable(self, var):
return self.mem[var]
def visit_param(self, param):
return param
def visit_assign(self, lhs, rhs):
if lhs is None:
rhs.visit(self)
else:
self.mem[lhs.v] = rhs.visit(self)
def visit_astore(self, array, index, rhs):
array = array.visit(self)
if isinstance(index, Constant):
idx = index.visit(self, 'I')
else:
idx = index.visit(self)
self.mem[array][idx] = rhs.visit(self)
def visit_return_void(self):
pass
def visit_aload(self, array, index):
arr = array.visit(self)
idx = index.visit(self)
return self.mem[arr][idx]
def visit_alength(self, array):
return len(self.mem[array.visit(self)])
def visit_binary_expression(self, op, arg1, arg2):
arg1 = arg1.visit(self)
if not isinstance(arg1, int):
arg1 = ord(arg1)
arg2 = arg2.visit(self)
if not isinstance(arg2, int):
arg2 = ord(arg2)
return eval('%s %s %s' % (arg1, op, arg2))
def visit_unary_expression(self, op, arg):
arg.visit(self)
def visit_cast(self, op, arg):
return arg.visit(self)
def visit_cond_expression(self, op, arg1, arg2):
arg1 = arg1.visit(self)
if not isinstance(arg1, int):
arg1 = ord(arg1)
arg2 = arg2.visit(self)
if not isinstance(arg2, int):
arg2 = ord(arg2)
return eval('%s %s %s' % (arg1, op, arg2))
def visit_get_static(self, cls, name):
return self.mem[name]
TEST = './apks/pacsec/magicspiral.apk'
vm = dvm.DalvikVMFormat(apk.APK(TEST).get_dex())
vma = uVMAnalysis(vm)
method = vm.get_method('crypt')[0]
amethod = vma.get_method(method)
dvmethod = DvMethod(amethod)
dvmethod.process() # build IR Form / control flow...
graph = dvmethod.graph
visitor = DemoEmulator(graph)
l = [94, 42, 93, 88, 3, 2, 95, 2, 13, 85, 11, 2, 19, 1, 125, 19, 0, 102,
30, 24, 19, 99, 76, 21, 102, 22, 26, 111, 39, 125, 2, 44, 80, 10, 90,
5, 119, 100, 119, 60, 4, 87, 79, 42, 52]
visitor.init(dvmethod.lparams[0], l)
KEYVALUE = '6^)(9-p35a%3#4S!4S0)$Yt%^&5(j.g^&o(*0)$Yv!#O@6GpG@=+3j.&6^)(0-=1'
visitor.init('KEYVALUE', '[BKEYVALUE')
visitor.init('[BKEYVALUE', KEYVALUE)
visitor.init('keylen', len(KEYVALUE))
method.show()
def show_mem(visitor):
print 'Memory[4]: %s' % visitor.mem[4]
print '==> %r' % ''.join(chr(i) for i in visitor.mem[4])
show_mem(visitor)
print '\nStarting visit...',
graph.get_entry().visit(visitor)
print ' done !\n'
show_mem(visitor)
| apache-2.0 |
cfg2015/EPT-2015-2 | openerp/modules/__init__.py | 352 | 1516 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
from . import db, graph, loading, migration, module, registry
from openerp.modules.loading import load_modules
from openerp.modules.module import get_modules, get_modules_with_version, \
load_information_from_description_file, get_module_resource, get_module_path, \
initialize_sys_path, load_openerp_module, init_module_models, adapt_version
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
debugger22/sympy | sympy/plotting/pygletplot/plot_mode_base.py | 96 | 11337 | from __future__ import print_function, division
from pyglet.gl import *
from plot_mode import PlotMode
from threading import Thread, Event, RLock
from color_scheme import ColorScheme
from sympy.core import S
from sympy.core.compatibility import is_sequence
from time import sleep
import warnings
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe': 1, 'solid': 2, 'both': 3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85, 0.85, 0.85)
default_solid_color = (0.6, 0.6, 0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except Exception:
warnings.warn("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self.cbounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = kwargs.pop('bounds_callback', None)
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = glGenLists(1)
glNewList(dl, GL_COMPILE)
function()
glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if GL_TRUE == glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glCallList(dl)
glPopAttrib()
def _draw_wireframe_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_POLYGON_OFFSET_LINE)
glPolygonOffset(-0.005, -50.0)
glCallList(dl)
glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f):
f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f):
f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet():
return
self._calculating_verts.set()
try:
self._on_calculate_verts()
finally:
self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet():
return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try:
self._on_calculate_cverts()
finally:
self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None:
return
if v == '':
step_max = 0
for i in self.intervals:
if i.v_steps is None:
continue
step_max = max([step_max, int(i.v_steps)])
v = ['both', 'solid'][step_max > 40]
if v not in self.styles:
raise ValueError("v should be there in self.styles")
if v == self._style:
return
self._style = v
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if is_sequence(v):
v = ColorScheme(*v)
else:
v = ColorScheme(v)
if repr(v) == repr(self._color):
return
self._on_change_color(v)
self._color = v
except Exception as e:
raise RuntimeError(("Color change failed. "
"Reason: %s" % (str(e))))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [('mode', self.primary_alias),
('color', str(self.color)),
('style', str(self.style))]
o = "'%s'" % (("; ".join("%s=%s" % (k, v)
for k, v in d if v != 'None')))
return ", ".join([f, i, o])
| bsd-3-clause |
radjkarl/dataArtist | dataArtist/figures/image/tools/input/VideoStream.py | 1 | 2582 | # coding=utf-8
from __future__ import division
import cv2
import numpy as np
from pyqtgraph_karl.Qt import QtCore
from imgProcessor.transformations import toFloatArray
from dataArtist.widgets.Tool import Tool
class VideoStream(Tool):
'''
Capture the video stream from a connected webcam.
'''
icon = 'webcam.svg'
def __init__(self, display):
Tool.__init__(self, display)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self._grabImage)
self.firstTime = True
pa = self.setParameterMenu()
self.pGrayscale = pa.addChild({
'name': 'Grayscale',
'type': 'bool',
'value': True})
self.pFloat = pa.addChild({
'name': 'To float',
'type': 'bool',
'value': True})
pFrequency = pa.addChild({
'name': 'Read frequency [Hz]',
'type': 'float',
'value': 20.0,
'min': 0})
pFrequency.sigValueChanged.connect(self._setInterval)
self._setInterval(pFrequency, pFrequency.value())
self.pBuffer = pa.addChild({
'name': 'Buffer last n images',
'type': 'int',
'value': 0})
def _setInterval(self, param, freq):
self.timer.setInterval(1000.0 / param.value())
def activate(self):
if self.firstTime:
self.display.addLayer(filename='VideoStream')
self.firstTime = False
self.vc = cv2.VideoCapture(-1)
self.timer.start()
def deactivate(self):
self.timer.stop()
def _grabImage(self):
w = self.display.widget
rval, img = self.vc.read()
if rval:
# COLOR
if self.pGrayscale.value():
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#img = cv2.transpose(img)
if self.pFloat.value():
img = toFloatArray(img)
i = w.image
b = self.pBuffer.value()
if b:
# BUFFER LAST N IMAGES
if i is None or len(i) < b:
self.display.addLayer(data=img)
else:
# TODO: implement as ring buffer using np.roll()
img = np.insert(i, 0, img, axis=0)
img = img[:self.pBuffer.value()]
w.setImage(img, autoRange=False, autoLevels=False)
else:
w.setImage(img, autoRange=False, autoLevels=False)
| gpl-3.0 |
TelematicaUSM/EduRT | panels/user/__init__.py | 2 | 20063 | # -*- coding: UTF-8 -*-
# COPYRIGHT (c) 2016 Cristóbal Ganter
#
# GNU AFFERO GENERAL PUBLIC LICENSE
# Version 3, 19 November 2007
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Render the user panel and process all user messages.
This module patches the :class:`controller.MSGHandler`
class, adding the
:meth:`controller.MSGHandler.send_user_not_loaded_error`,
:meth:`controller.MSGHandler.send_room_not_loaded_error` and
:meth:`controller.MSGHandler.logout_and_close` methods.
"""
from functools import partialmethod
import jwt
from tornado.gen import coroutine
from pymongo.errors import OperationFailure
import src
from controller import MSGHandler
from backend_modules import router
from src import messages as msg
from src.db import User, Code, NoObjectReturnedFromDB, \
ConditionNotMetError, Course, Room
from src.pub_sub import MalformedMessageError
from src.wsclass import subscribe
_path = msg.join_path('panels', 'user')
MSGHandler.send_user_not_loaded_error = partialmethod(
MSGHandler.send_error,
'userNotLoaded',
description='There was no loaded user when '
'this message arrived.'
)
MSGHandler.send_room_not_loaded_error = partialmethod(
MSGHandler.send_error,
'roomNotLoaded',
description='There was no loaded room when '
'this message arrived.'
)
def _logout_and_close(self, reason):
"""Send a logout message and close the connection."""
self.write_message({'type': 'logout'})
self.close(1000, reason)
self.clean_closed = True
MSGHandler.logout_and_close = _logout_and_close
class UserPanel(src.boiler_ui_module.BoilerUIModule):
id_ = 'user-panel'
classes = {'scrolling-panel', 'system'}
name = 'Usuario'
conf = {
'static_url_prefix': '/user/',
'static_path': './panels/user/static',
'css_files': ['user.css'],
'js_files': ['user.js'],
}
def render(self):
return self.render_string(
'../panels/user/user.html')
class UserWSC(src.wsclass.WSClass):
"""Process user messages"""
_path = msg.join_path(_path, 'UserWSC')
@staticmethod
def should_run_room_deassign_course(
has_course, was_none, was_student, was_teacher,
is_none, is_student, is_teacher, distinct_room):
return was_teacher and has_course and (
is_student or is_teacher and distinct_room)
@staticmethod
def should_run_user_deassign_course(
has_course, was_none, was_student, was_teacher,
is_none, is_student, is_teacher, distinct_room):
student_condition = \
was_student and (
not is_student or distinct_room)
teacher_condition = \
was_teacher and not is_none and (
not is_teacher or distinct_room)
return has_course and (
student_condition or
teacher_condition
)
@staticmethod
def should_run_use_seat(
has_course, was_none, was_student, was_teacher,
is_none, is_student, is_teacher, distinct_room):
return is_student
@staticmethod
def should_run_logout_other_instances(
has_course, was_none, was_student, was_teacher,
is_none, is_student, is_teacher, distinct_room):
return \
was_student or \
is_student or \
was_none and not is_none or \
is_teacher and distinct_room
@staticmethod
def should_run_load_course(
has_course, was_none, was_student, was_teacher,
is_none, is_student, is_teacher, distinct_room):
return \
has_course and not distinct_room and (
was_student and is_student or
was_teacher and is_teacher
)
@staticmethod
def should_run_redirect_to_teacher_view(
has_course, was_none, was_student, was_teacher,
is_none, is_student, is_teacher, distinct_room):
return was_teacher and is_none
def __init__(self, handler):
super().__init__(handler)
self.session_start_ok = False
self.block_logout = False
self.user_state_at_exclusive_login = None
self.dont_leave_seat = False
@coroutine
def load_user(self, token):
try:
uid = jwt.decode(token, verify=False)['id']
user = yield User.get(uid)
jwt.decode(token, user.secret)
self.handler.user = user
except NoObjectReturnedFromDB as norfdb:
ite = jwt.InvalidTokenError(
'No user was found in the database for '
'this token.'
)
raise ite from norfdb
@subscribe('teacherMessage', 'l')
@coroutine
def redirect_message_if_user_is_teacher(
self, message, content=True):
"""Redirects a message if the user is a teacher.
This coroutine redirects a message to the local
channel only if the current user is a teacher.
:param dict message:
The message that should be redirected if the
user is a teacher.
:param bool content:
If ``True``, just the object corresponding to
the ``'content'`` key of ``message`` will be
sent.
If ``False``, the whole message will be sent.
:raises MalformedMessageError:
If ``content`` is ``True``, but ``message``
doesn't have the ``'content'`` key.
:raises NotDictError:
If ``message`` is not a dictionary.
:raises NoMessageTypeError:
If the message or it's content doesn't have the
``'type'`` key.
:raises NoActionForMsgTypeError:
If ``send_function`` of the ``PubSub`` object
wasn't specified during object creation and
there's no registered action for this message
type.
:raises AttributeError:
If the user is not yet loaded or if the user is
``None``.
"""
try:
if self.handler.user.status == 'room':
self.redirect_to('l', message, content)
except:
raise
@subscribe('logout', 'l')
def logout(self, message):
try:
if self.block_logout:
self.block_logout = False
else:
self.user_state_at_exclusive_login = \
message.get(
'user_state_at_exclusive_login')
self.dont_leave_seat = message.get(
'dont_leave_seat', False)
self.handler.logout_and_close(
message['reason'])
except KeyError as ke:
if 'reason' not in message:
mme = MalformedMessageError(
"'reason' key not found in message.")
raise mme from ke
else:
raise
@subscribe('userMessage', channels={'w', 'l'})
def send_user_message(self, message):
"""Send a message to all instances of a single user.
The user id is appended to the message type and
``message`` is sent through the database.
.. todo::
* Change the message type of the subscription
to be organised by namespace.
* Change this method so that it uses
self.handler.user_msg_type.
"""
try:
message['type'] = '{}({})'.format(
message['type'], self.handler.user.id)
self.redirect_to('d', message)
except MalformedMessageError:
self.handler.send_malformed_message_error(
message)
msg.malformed_message(_path, message)
except AttributeError:
if not hasattr(self.handler, 'user'):
self.handler.send_user_not_loaded_error(
message)
else:
raise
@subscribe(
'user.message.frontend.send', channels={'w', 'l'})
def send_frontend_user_message(self, message):
"""Send a message to all clients of a single user.
.. todo::
* Review the error handling and documentation
of this funcion.
"""
try:
self.pub_subs['d'].send_message(
{
'type': self.handler.user_msg_type,
'content': {
'type': 'toFrontend',
'content': message['content']
}
}
)
except:
raise
def sub_to_user_messages(self):
"""Route messages of the same user to the local PS.
This method subscribes
:meth:`backend_modules.router.RouterWSC.to_local` to
the messages of type ``userMessage(uid)`` coming
from the database pub-sub. Where ``uid`` is the
current user id.
After the execution of this method,
``self.handler.user_msg_type`` contains the message
type to be used to send messages to all instances of
the user.
"""
self.handler.user_msg_type = \
'userMessage({})'.format(self.handler.user.id)
router_object = self.handler.ws_objects[
router.RouterWSC]
self.register_action_in(
self.handler.user_msg_type,
action=router_object.to_local,
channels={'d'}
)
def send_session_start_error(self, message, causes):
"""Send a session error message to the client.
:param dict message:
The message that caused the error.
:param str causes:
A string explaining the posible causes of the
error.
"""
try:
self.handler.send_error(
'session.start.error',
message,
'La sesión no se ha podido iniciar. ' +
causes
)
except TypeError as e:
if not isinstance(message, dict):
te = TypeError(
'message should be a dictionary.')
raise te from e
elif not isinstance(causes, str):
te = TypeError(
'causes should be a string.')
raise te from e
else:
raise
@coroutine
def load_room_code(self, room_code_str):
"""Load the room code and room from the db.
..todo::
* Write error handling.
"""
if room_code_str == 'none':
room_code = None
room = None
code_type = 'none'
room_name = None
seat_id = None
else:
room_code = yield Code.get(room_code_str)
room = yield room_code.room
code_type = room_code.code_type.value
room_name = room.name
seat_id = room_code.seat_id
self.handler.room_code = room_code
self.handler.room = room
return (code_type, room_name, seat_id)
def redirect_to_teacher_view(self, room_code, message):
"""Redirect the client to the current teacher view.
.. todo:
* Use send_session_start_error instead of
handler.send_error.
"""
if room_code == 'none':
err_msg = "Can't redirect user to the " \
"teacher view. This can be caused by an " \
"inconsistency in the database."
self.handler.send_error(
'databaseInconsistency', message, err_msg)
raise Exception(err_msg)
self.pub_subs['w'].send_message(
{
'type': 'replaceLocation',
'url': room_code
}
)
@subscribe('session.start', 'w')
@coroutine
def startSession(self, message):
"""Start a user session
.. todo::
* Add a variable that indicates the last stage
that was executed successfully. So that the
finally clause can clean the mess properly.
"""
try:
yield self.load_user(message['token'])
self.sub_to_user_messages()
code_type, room_name, seat_id = \
yield self.load_room_code(
message['room_code'])
user = self.handler.user
room_code = self.handler.room_code
room = self.handler.room
course_id = user.course_id
has_course = course_id is not None
was_none = (user.status == 'none')
was_student = (user.status == 'seat')
was_teacher = (user.status == 'room')
is_none = (code_type == 'none')
is_student = (code_type == 'seat')
is_teacher = (code_type == 'room')
distinct_room = not (
room_name is None or
user.room_name is None or
room_name == user.room_name
)
same_seat = (seat_id == user.seat_id) and \
not distinct_room
transition_data = (
has_course, was_none, was_student,
was_teacher, is_none, is_student,
is_teacher, distinct_room
)
# Redirect to Teacher View
if self.should_run_redirect_to_teacher_view(
*transition_data):
self.redirect_to_teacher_view(
user.room_code, message)
# The rest of the code is not executed.
return
# Room Deassign Course
'''This should always run before "User Deassign
Course"'''
if self.should_run_room_deassign_course(
*transition_data):
if distinct_room:
r = yield Room.get(user.room_name)
else:
r = room
yield r.deassign_course(course_id)
# User Deassign Course
if self.should_run_user_deassign_course(
*transition_data):
yield user.deassign_course()
# Logout Other Instances
if self.should_run_logout_other_instances(
*transition_data):
self.block_logout = True
self.pub_subs['d'].send_message(
{
'type': self.handler.user_msg_type,
'content': {
'type': 'logout',
'reason': 'exclusiveLogin',
'user_state_at_exclusive_login':
user._data,
'dont_leave_seat': same_seat,
}
}
)
# Use Seat
if self.should_run_use_seat(*transition_data) \
and (not same_seat or
user.instances == 0):
yield room.use_seat(room_code.seat_id)
# Load Course
if self.should_run_load_course(
*transition_data):
self.handler.course = yield Course.get(
course_id)
# Increase Instances
yield self.handler.user.increase_instances()
yield self.handler.user.store_dict(
{
'status': code_type,
'room_code': message['room_code'],
'room_name': room_name,
'seat_id': seat_id,
}
)
except jwt.InvalidTokenError:
self.handler.logout_and_close('invalidToken')
except ConditionNotMetError:
self.send_session_start_error(
message,
'Es probable que este error se daba a que '
'el asiento que desea registrar ya está '
'usado.'
)
except OperationFailure:
self.send_session_start_error(
message,
'Una operación de la base de datos ha '
'fallado.'
)
except KeyError:
keys_in_message = all(
map(
lambda k: k in message,
('token', 'room_code')
)
)
if not keys_in_message:
self.handler.send_malformed_message_error(
message)
else:
raise
else:
self.session_start_ok = True
self.pub_subs['w'].send_message(
{
'type': 'session.start.ok',
'code_type': code_type,
'course_id': user.course_id
}
)
finally:
if not self.session_start_ok:
pass
@subscribe('getUserName', 'w')
def get_user_name(self, message):
"""Send the user's name to the client.
.. todo::
* Re-raise attribute error
* review error handling.
"""
try:
name = self.handler.user.name
self.pub_subs['w'].send_message(
{'type': 'userName',
'name': name})
except AttributeError:
self.handler.send_user_not_loaded_error(message)
@coroutine
def end_room_usage(self, user, is_teacher, is_student):
try:
room_code = self.handler.room_code
room = yield room_code.room
# Room Deassign Course
if is_teacher and \
user.course_id is not None and \
user.instances == 1:
yield room.deassign_course(user.course_id)
# Leave seat
if is_student and not self.dont_leave_seat:
yield room.leave_seat(room_code.seat_id)
except AttributeError:
if not hasattr(self.handler, 'room_code') or \
self.handler.room_code is None:
msg.code_warning(
msg.join_path(
__name__, self.end.__qualname__),
"room_code wasn't initialized at "
"{.__class__.__name__}'s "
"end.".format(self)
)
else:
raise
@coroutine
def end(self):
try:
yield super().end()
if not self.session_start_ok:
return
if self.user_state_at_exclusive_login:
user = User(
self.user_state_at_exclusive_login)
else:
user = self.handler.user
yield user.sync('instances')
is_teacher = (user.status == 'room')
is_student = (user.status == 'seat')
# Room Deassign Course
# Leave seat
yield self.end_room_usage(
user, is_teacher, is_student)
# User Deassign Course
yield user.deassign_course(
if_last_instance=is_teacher)
# Decrease Instances
# (can modify status and course_id)
yield user.decrease_instances()
except:
raise
| agpl-3.0 |
qxsch/QXSConsolas | examples/CopyThat/copyThat/sqlalchemy/testing/runner.py | 55 | 1607 | #!/usr/bin/env python
# testing/runner.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Nose test runner module.
This script is a front-end to "nosetests" which
installs SQLAlchemy's testing plugin into the local environment.
The script is intended to be used by third-party dialects and extensions
that run within SQLAlchemy's testing framework. The runner can
be invoked via::
python -m sqlalchemy.testing.runner
The script is then essentially the same as the "nosetests" script, including
all of the usual Nose options. The test environment requires that a
setup.cfg is locally present including various required options.
Note that when using this runner, Nose's "coverage" plugin will not be
able to provide coverage for SQLAlchemy itself, since SQLAlchemy is
imported into sys.modules before coverage is started. The special
script sqla_nose.py is provided as a top-level script which loads the
plugin in a special (somewhat hacky) way so that coverage against
SQLAlchemy itself is possible.
"""
from .plugin.noseplugin import NoseSQLAlchemy
import nose
def main():
nose.main(addplugins=[NoseSQLAlchemy()])
def setup_py_test():
"""Runner to use for the 'test_suite' entry of your setup.py.
Prevents any name clash shenanigans from the command line
argument "test" that the "setup.py test" command sends
to nose.
"""
nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner'])
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/test/test_md5.py | 194 | 1790 | # Testing md5 module
import warnings
warnings.filterwarnings("ignore", "the md5 module is deprecated.*",
DeprecationWarning)
import unittest
from md5 import md5
from test import test_support
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class MD5_Test(unittest.TestCase):
def md5test(self, s, expected):
self.assertEqual(hexstr(md5(s).digest()), expected)
self.assertEqual(md5(s).hexdigest(), expected)
def test_basics(self):
eq = self.md5test
eq('', 'd41d8cd98f00b204e9800998ecf8427e')
eq('a', '0cc175b9c0f1b6a831c399e269772661')
eq('abc', '900150983cd24fb0d6963f7d28e17f72')
eq('message digest', 'f96b697d7cb7938d525a2f31aaf161d0')
eq('abcdefghijklmnopqrstuvwxyz', 'c3fcd3d76192e4007dfb496cca67e13b')
eq('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
eq('12345678901234567890123456789012345678901234567890123456789012345678901234567890',
'57edf4a22be3c955ac49da2e2107b67a')
def test_hexdigest(self):
# hexdigest is new with Python 2.0
m = md5('testing the hexdigest method')
h = m.hexdigest()
self.assertEqual(hexstr(m.digest()), h)
def test_large_update(self):
aas = 'a' * 64
bees = 'b' * 64
cees = 'c' * 64
m1 = md5()
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = md5()
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def test_main():
test_support.run_unittest(MD5_Test)
if __name__ == '__main__':
test_main()
| mit |
CyrilWaechter/pyRevitMEP | pyRevitMEP.tab/Lab.panel/Lab.pulldown/AirFlowSpaceToTerminal.pushbutton/script.py | 1 | 3085 | # coding: utf8
from Autodesk.Revit.DB import (
FilteredElementCollector,
BuiltInCategory,
Transaction,
Document,
BuiltInParameter,
Category,
Connector,
)
from Autodesk.Revit.DB.Mechanical import DuctFlowConfigurationType
import rpw
from pyrevit import script, forms
__title__ = "AirFlowSpaceToAirTerminal"
__author__ = "Cyril Waechter"
__doc__ = "Copy actual air flow (total terminals air flow) to Space design flow"
doc = __revit__.ActiveUIDocument.Document # type: Document
uidoc = __revit__.ActiveUIDocument # type: UIDocument
logger = script.get_logger()
def create_terminal_space_dict():
selection = get_terminals_in_selection()
if not selection:
selection = FilteredElementCollector(doc).OfCategory(
BuiltInCategory.OST_DuctTerminal
)
d = dict()
for air_terminal in selection:
connector = [c for c in air_terminal.MEPModel.ConnectorManager.Connectors][0] # type: Connector
if connector.AssignedDuctFlowConfiguration != DuctFlowConfigurationType.Preset:
continue
space = doc.GetSpaceAtPoint(air_terminal.Location.Point)
system_type = air_terminal.get_Parameter(
BuiltInParameter.RBS_SYSTEM_CLASSIFICATION_PARAM
).AsString()
if space:
d.setdefault(space.Id, dict()).setdefault(system_type, []).append(
air_terminal
)
else:
logger.info("{} is not in any space".format(air_terminal.Id))
logger.debug(d)
return d
def get_terminals_in_selection():
terminals = []
terminal_cat_id = Category.GetCategory(doc, BuiltInCategory.OST_DuctTerminal).Id
for id in uidoc.Selection.GetElementIds():
element = doc.GetElement(id)
if element.Category.Id == terminal_cat_id:
terminals.append(element)
return terminals
def space_to_air_terminals():
with rpw.db.Transaction(doc=doc, name="Store Spaces flow to AirTerminals"):
for space_id, system_types in create_terminal_space_dict().items():
space = doc.GetElement(space_id)
fou = space.get_Parameter(
BuiltInParameter.ROOM_DESIGN_SUPPLY_AIRFLOW_PARAM
).AsDouble()
rep = space.get_Parameter(
BuiltInParameter.ROOM_DESIGN_RETURN_AIRFLOW_PARAM
).AsDouble()
try:
fou_terminals = system_types["Soufflage"]
set_terminals_flow(fou_terminals, fou)
except KeyError:
pass
try:
rep_terminals = system_types["Reprise"]
set_terminals_flow(rep_terminals, rep)
except KeyError:
pass
def set_terminals_flow(air_terminals, total_flow):
quantity = len(air_terminals)
for air_terminal in air_terminals:
for air_terminal in air_terminals:
air_terminal.get_Parameter(BuiltInParameter.RBS_DUCT_FLOW_PARAM).Set(
total_flow / quantity
)
if __name__ == "__main__":
space_to_air_terminals()
| gpl-3.0 |
Orochimarufan/youtube-dl | youtube_dl/extractor/reuters.py | 64 | 2438 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
js_to_json,
int_or_none,
unescapeHTML,
)
class ReutersIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?reuters\.com/.*?\?.*?videoId=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.reuters.com/video/2016/05/20/san-francisco-police-chief-resigns?videoId=368575562',
'md5': '8015113643a0b12838f160b0b81cc2ee',
'info_dict': {
'id': '368575562',
'ext': 'mp4',
'title': 'San Francisco police chief resigns',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.reuters.com/assets/iframe/yovideo?videoId=%s' % video_id, video_id)
video_data = js_to_json(self._search_regex(
r'(?s)Reuters\.yovideo\.drawPlayer\(({.*?})\);',
webpage, 'video data'))
def get_json_value(key, fatal=False):
return self._search_regex(r'"%s"\s*:\s*"([^"]+)"' % key, video_data, key, fatal=fatal)
title = unescapeHTML(get_json_value('title', fatal=True))
mmid, fid = re.search(r',/(\d+)\?f=(\d+)', get_json_value('flv', fatal=True)).groups()
mas_data = self._download_json(
'http://mas-e.cds1.yospace.com/mas/%s/%s?trans=json' % (mmid, fid),
video_id, transform_source=js_to_json)
formats = []
for f in mas_data:
f_url = f.get('url')
if not f_url:
continue
method = f.get('method')
if method == 'hls':
formats.extend(self._extract_m3u8_formats(
f_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
else:
container = f.get('container')
ext = '3gp' if method == 'mobile' else container
formats.append({
'format_id': ext,
'url': f_url,
'ext': ext,
'container': container if method != 'mobile' else None,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': get_json_value('thumb'),
'duration': int_or_none(get_json_value('seconds')),
'formats': formats,
}
| unlicense |
Hakuba/youtube-dl | youtube_dl/extractor/ndtv.py | 129 | 2551 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
month_by_name,
int_or_none,
)
class NDTVIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?ndtv\.com/video/player/[^/]*/[^/]*/(?P<id>[a-z0-9]+)'
_TEST = {
'url': 'http://www.ndtv.com/video/player/news/ndtv-exclusive-don-t-need-character-certificate-from-rahul-gandhi-says-arvind-kejriwal/300710',
'md5': '39f992dbe5fb531c395d8bbedb1e5e88',
'info_dict': {
'id': '300710',
'ext': 'mp4',
'title': "NDTV exclusive: Don't need character certificate from Rahul Gandhi, says Arvind Kejriwal",
'description': 'md5:ab2d4b4a6056c5cb4caa6d729deabf02',
'upload_date': '20131208',
'duration': 1327,
'thumbnail': 'http://i.ndtvimg.com/video/images/vod/medium/2013-12/big_300710_1386518307.jpg',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
filename = self._search_regex(
r"__filename='([^']+)'", webpage, 'video filename')
video_url = ('http://bitcast-b.bitgravity.com/ndtvod/23372/ndtv/%s' %
filename)
duration = int_or_none(self._search_regex(
r"__duration='([^']+)'", webpage, 'duration', fatal=False))
date_m = re.search(r'''(?x)
<p\s+class="vod_dateline">\s*
Published\s+On:\s*
(?P<monthname>[A-Za-z]+)\s+(?P<day>[0-9]+),\s*(?P<year>[0-9]+)
''', webpage)
upload_date = None
if date_m is not None:
month = month_by_name(date_m.group('monthname'))
if month is not None:
upload_date = '%s%02d%02d' % (
date_m.group('year'), month, int(date_m.group('day')))
description = self._og_search_description(webpage)
READ_MORE = ' (Read more)'
if description.endswith(READ_MORE):
description = description[:-len(READ_MORE)]
title = self._og_search_title(webpage)
TITLE_SUFFIX = ' - NDTV'
if title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)]
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
'duration': duration,
'upload_date': upload_date,
}
| unlicense |
FenceAtMHacks/flaskbackend | fence-api/flask/lib/python2.7/site-packages/setuptools/tests/test_develop.py | 41 | 3496 | """develop tests
"""
import os
import shutil
import site
import sys
import tempfile
import unittest
from distutils.errors import DistutilsError
from setuptools.command.develop import develop
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo',
packages=['foo'],
use_2to3=True,
)
"""
INIT_PY = """print "foo"
"""
class TestDevelopTest(unittest.TestCase):
def setUp(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
# Directory structure
self.dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.dir, 'foo'))
# setup.py
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
# foo/__init__.py
init = os.path.join(self.dir, 'foo', '__init__.py')
f = open(init, 'w')
f.write(INIT_PY)
f.close()
os.chdir(self.dir)
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix):
return
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_develop(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
dist = Distribution(
dict(name='foo',
packages=['foo'],
use_2to3=True,
version='0.0',
))
dist.script_name = 'setup.py'
cmd = develop(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
old_stdout = sys.stdout
#sys.stdout = StringIO()
try:
cmd.run()
finally:
sys.stdout = old_stdout
# let's see if we got our egg link at the right place
content = os.listdir(site.USER_SITE)
content.sort()
self.assertEqual(content, ['easy-install.pth', 'foo.egg-link'])
# Check that we are using the right code.
egg_link_file = open(os.path.join(site.USER_SITE, 'foo.egg-link'), 'rt')
try:
path = egg_link_file.read().split()[0].strip()
finally:
egg_link_file.close()
init_file = open(os.path.join(path, 'foo', '__init__.py'), 'rt')
try:
init = init_file.read().strip()
finally:
init_file.close()
if sys.version < "3":
self.assertEqual(init, 'print "foo"')
else:
self.assertEqual(init, 'print("foo")')
def notest_develop_with_setup_requires(self):
wanted = ("Could not find suitable distribution for "
"Requirement.parse('I-DONT-EXIST')")
old_dir = os.getcwd()
os.chdir(self.dir)
try:
try:
Distribution({'setup_requires': ['I_DONT_EXIST']})
except DistutilsError:
e = sys.exc_info()[1]
error = str(e)
if error == wanted:
pass
finally:
os.chdir(old_dir)
| mit |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn_test.py | 25 | 3269 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
import tensorflow as tf
from tensorflow.contrib.tensor_forest.hybrid.python.models import forest_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class ForestToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=3,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (
self.params.feature_bagging_fraction * self.params.num_features)
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"ForestToDataThenNNTest_testInferenceContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"ForestToDataThenNNTest.testTrainingContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
anand-c-goog/tensorflow | tensorflow/python/training/slot_creator.py | 27 | 4034 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
def _create_slot_var(primary, val, scope):
"""Helper function for creating a slot variable."""
slot = variables.Variable(val, name=scope, trainable=False)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
real_slot_name = scope[len(primary.op.name + "/"):-1]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
with ops.name_scope(primary.op.name + "/" + name) as scope:
if colocate_with_primary:
with ops.colocate_with(primary):
return _create_slot_var(primary, val, scope)
else:
return _create_slot_var(primary, val, scope)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
val = array_ops.zeros(primary.get_shape().as_list(), dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
| apache-2.0 |
Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/logilab/common/test/unittest_configuration.py | 7 | 13014 | # copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
import tempfile
import os
from os.path import join, dirname, abspath
import re
from sys import version_info
from logilab.common.compat import StringIO
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.optik_ext import OptionValueError
from logilab.common.configuration import Configuration, OptionError, \
OptionsManagerMixIn, OptionsProviderMixIn, Method, read_old_config, \
merge_options
DATA = join(dirname(abspath(__file__)), 'data')
OPTIONS = [('dothis', {'type':'yn', 'action': 'store', 'default': True, 'metavar': '<y or n>'}),
('value', {'type': 'string', 'metavar': '<string>', 'short': 'v'}),
('multiple', {'type': 'csv', 'default': ['yop', 'yep'],
'metavar': '<comma separated values>',
'help': 'you can also document the option'}),
('number', {'type': 'int', 'default':2, 'metavar':'<int>', 'help': 'boom'}),
('choice', {'type': 'choice', 'default':'yo', 'choices': ('yo', 'ye'),
'metavar':'<yo|ye>'}),
('multiple-choice', {'type': 'multiple_choice', 'default':['yo', 'ye'],
'choices': ('yo', 'ye', 'yu', 'yi', 'ya'),
'metavar':'<yo|ye>'}),
('named', {'type':'named', 'default':Method('get_named'),
'metavar': '<key=val>'}),
('diffgroup', {'type':'string', 'default':'pouet', 'metavar': '<key=val>',
'group': 'agroup'}),
('reset-value', {'type': 'string', 'metavar': '<string>', 'short': 'r',
'dest':'value'}),
]
class MyConfiguration(Configuration):
"""test configuration"""
def get_named(self):
return {'key': 'val'}
class ConfigurationTC(TestCase):
def setUp(self):
self.cfg = MyConfiguration(name='test', options=OPTIONS, usage='Just do it ! (tm)')
def test_default(self):
cfg = self.cfg
self.assertEqual(cfg['dothis'], True)
self.assertEqual(cfg['value'], None)
self.assertEqual(cfg['multiple'], ['yop', 'yep'])
self.assertEqual(cfg['number'], 2)
self.assertEqual(cfg['choice'], 'yo')
self.assertEqual(cfg['multiple-choice'], ['yo', 'ye'])
self.assertEqual(cfg['named'], {'key': 'val'})
def test_base(self):
cfg = self.cfg
cfg.set_option('number', '0')
self.assertEqual(cfg['number'], 0)
self.assertRaises(OptionValueError, cfg.set_option, 'number', 'youpi')
self.assertRaises(OptionValueError, cfg.set_option, 'choice', 'youpi')
self.assertRaises(OptionValueError, cfg.set_option, 'multiple-choice', ('yo', 'y', 'ya'))
cfg.set_option('multiple-choice', 'yo, ya')
self.assertEqual(cfg['multiple-choice'], ['yo', 'ya'])
self.assertEqual(cfg.get('multiple-choice'), ['yo', 'ya'])
self.assertEqual(cfg.get('whatever'), None)
def test_load_command_line_configuration(self):
cfg = self.cfg
args = cfg.load_command_line_configuration(['--choice', 'ye', '--number', '4',
'--multiple=1,2,3', '--dothis=n',
'other', 'arguments'])
self.assertEqual(args, ['other', 'arguments'])
self.assertEqual(cfg['dothis'], False)
self.assertEqual(cfg['multiple'], ['1', '2', '3'])
self.assertEqual(cfg['number'], 4)
self.assertEqual(cfg['choice'], 'ye')
self.assertEqual(cfg['value'], None)
args = cfg.load_command_line_configuration(['-v', 'duh'])
self.assertEqual(args, [])
self.assertEqual(cfg['value'], 'duh')
self.assertEqual(cfg['dothis'], False)
self.assertEqual(cfg['multiple'], ['1', '2', '3'])
self.assertEqual(cfg['number'], 4)
self.assertEqual(cfg['choice'], 'ye')
def test_load_configuration(self):
cfg = self.cfg
args = cfg.load_configuration(choice='ye', number='4',
multiple='1,2,3', dothis='n',
multiple_choice=('yo', 'ya'))
self.assertEqual(cfg['dothis'], False)
self.assertEqual(cfg['multiple'], ['1', '2', '3'])
self.assertEqual(cfg['number'], 4)
self.assertEqual(cfg['choice'], 'ye')
self.assertEqual(cfg['value'], None)
self.assertEqual(cfg['multiple-choice'], ('yo', 'ya'))
def test_load_configuration_file_case_insensitive(self):
file = tempfile.mktemp()
stream = open(file, 'w')
try:
stream.write("""[Test]
dothis=no
#value=
# you can also document the option
multiple=yop,yepii
# boom
number=3
choice=yo
multiple-choice=yo,ye
named=key:val
[agroup]
diffgroup=zou
""")
stream.close()
self.cfg.load_file_configuration(file)
self.assertEqual(self.cfg['dothis'], False)
self.assertEqual(self.cfg['value'], None)
self.assertEqual(self.cfg['multiple'], ['yop', 'yepii'])
self.assertEqual(self.cfg['diffgroup'], 'zou')
finally:
os.remove(file)
def test_option_order(self):
""" Check that options are taken into account in the command line order
and not in the order they are defined in the Configuration object.
"""
file = tempfile.mktemp()
stream = open(file, 'w')
try:
stream.write("""[Test]
reset-value=toto
value=tata
""")
stream.close()
self.cfg.load_file_configuration(file)
finally:
os.remove(file)
self.assertEqual(self.cfg['value'], 'tata')
def test_unsupported_options(self):
file = tempfile.mktemp()
stream = open(file, 'w')
try:
stream.write("""[Test]
whatever=toto
value=tata
""")
stream.close()
self.cfg.load_file_configuration(file)
finally:
os.remove(file)
self.assertEqual(self.cfg['value'], 'tata')
self.assertRaises(OptionError, self.cfg.__getitem__, 'whatever')
def test_generate_config(self):
stream = StringIO()
self.cfg.generate_config(stream)
self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST]
dothis=yes
#value=
# you can also document the option
multiple=yop,yep
# boom
number=2
choice=yo
multiple-choice=yo,ye
named=key:val
#reset-value=
[AGROUP]
diffgroup=pouet""")
def test_generate_config_with_space_string(self):
self.cfg['value'] = ' '
stream = StringIO()
self.cfg.generate_config(stream)
self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST]
dothis=yes
value=' '
# you can also document the option
multiple=yop,yep
# boom
number=2
choice=yo
multiple-choice=yo,ye
named=key:val
reset-value=' '
[AGROUP]
diffgroup=pouet""")
def test_roundtrip(self):
cfg = self.cfg
f = tempfile.mktemp()
stream = open(f, 'w')
try:
self.cfg['dothis'] = False
self.cfg['multiple'] = ["toto", "tata"]
self.cfg['number'] = 3
cfg.generate_config(stream)
stream.close()
new_cfg = MyConfiguration(name='test', options=OPTIONS)
new_cfg.load_file_configuration(f)
self.assertEqual(cfg['dothis'], new_cfg['dothis'])
self.assertEqual(cfg['multiple'], new_cfg['multiple'])
self.assertEqual(cfg['number'], new_cfg['number'])
self.assertEqual(cfg['choice'], new_cfg['choice'])
self.assertEqual(cfg['value'], new_cfg['value'])
self.assertEqual(cfg['multiple-choice'], new_cfg['multiple-choice'])
finally:
os.remove(f)
def test_setitem(self):
self.assertRaises(OptionValueError,
self.cfg.__setitem__, 'multiple-choice', ('a', 'b'))
self.cfg['multiple-choice'] = ('yi', 'ya')
self.assertEqual(self.cfg['multiple-choice'], ('yi', 'ya'))
def test_help(self):
self.cfg.add_help_section('bonus', 'a nice additional help')
help = self.cfg.help().strip()
# at least in python 2.4.2 the output is:
# ' -v <string>, --value=<string>'
# it is not unlikely some optik/optparse versions do print -v<string>
# so accept both
help = help.replace(' -v <string>, ', ' -v<string>, ')
help = re.sub('[ ]*(\r?\n)', '\\1', help)
USAGE = """Usage: Just do it ! (tm)
Options:
-h, --help show this help message and exit
--dothis=<y or n>
-v<string>, --value=<string>
--multiple=<comma separated values>
you can also document the option [current: yop,yep]
--number=<int> boom [current: 2]
--choice=<yo|ye>
--multiple-choice=<yo|ye>
--named=<key=val>
-r <string>, --reset-value=<string>
Agroup:
--diffgroup=<key=val>
Bonus:
a nice additional help"""
if version_info < (2, 5):
# 'usage' header is not capitalized in this version
USAGE = USAGE.replace('Usage: ', 'usage: ')
elif version_info < (2, 4):
USAGE = """usage: Just do it ! (tm)
options:
-h, --help show this help message and exit
--dothis=<y or n>
-v<string>, --value=<string>
--multiple=<comma separated values>
you can also document the option
--number=<int>
--choice=<yo|ye>
--multiple-choice=<yo|ye>
--named=<key=val>
Bonus:
a nice additional help
"""
self.assertMultiLineEqual(help, USAGE)
def test_manpage(self):
from logilab.common import __pkginfo__
self.cfg.generate_manpage(__pkginfo__, stream=StringIO())
def test_rewrite_config(self):
changes = [('renamed', 'renamed', 'choice'),
('moved', 'named', 'old', 'test'),
]
read_old_config(self.cfg, changes, join(DATA, 'test.ini'))
stream = StringIO()
self.cfg.generate_config(stream)
self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST]
dothis=yes
value=' '
# you can also document the option
multiple=yop
# boom
number=2
choice=yo
multiple-choice=yo,ye
named=key:val
reset-value=' '
[AGROUP]
diffgroup=pouet""")
class Linter(OptionsManagerMixIn, OptionsProviderMixIn):
options = (
('profile', {'type' : 'yn', 'metavar' : '<y_or_n>',
'default': False,
'help' : 'Profiled execution.'}),
)
def __init__(self):
OptionsManagerMixIn.__init__(self, usage="")
OptionsProviderMixIn.__init__(self)
self.register_options_provider(self)
self.load_provider_defaults()
class RegrTC(TestCase):
def setUp(self):
self.linter = Linter()
def test_load_defaults(self):
self.linter.load_command_line_configuration([])
self.assertEqual(self.linter.config.profile, False)
class MergeTC(TestCase):
def test_merge1(self):
merged = merge_options([('dothis', {'type':'yn', 'action': 'store', 'default': True, 'metavar': '<y or n>'}),
('dothis', {'type':'yn', 'action': 'store', 'default': False, 'metavar': '<y or n>'}),
])
self.assertEqual(len(merged), 1)
self.assertEqual(merged[0][0], 'dothis')
self.assertEqual(merged[0][1]['default'], True)
def test_merge2(self):
merged = merge_options([('dothis', {'type':'yn', 'action': 'store', 'default': True, 'metavar': '<y or n>'}),
('value', {'type': 'string', 'metavar': '<string>', 'short': 'v'}),
('dothis', {'type':'yn', 'action': 'store', 'default': False, 'metavar': '<y or n>'}),
])
self.assertEqual(len(merged), 2)
self.assertEqual(merged[0][0], 'value')
self.assertEqual(merged[1][0], 'dothis')
self.assertEqual(merged[1][1]['default'], True)
if __name__ == '__main__':
unittest_main()
| mit |
Jgarcia-IAS/SITE | addons/lunch/wizard/__init__.py | 440 | 1053 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import lunch_validation
import lunch_cancel
import lunch_order
| agpl-3.0 |
martinb3/helpbot | plugins/help.py | 1 | 1095 | from lib import helps
from lib import utils
outputs = []
def process_message(data):
if data.get('text', '').split(' ')[0] == '!help':
admin_channel, botname, icon_emoji = utils.setup_bot(config)
message_attrs = {'icon_emoji': icon_emoji, 'username': botname}
# Translate channel id to channel name
channel = data.get('channel')
channel_name = utils.get_channel_name(channel, slack_client)
# Translate user id to username
user = data.get('user')
username = utils.get_user_name(user, slack_client)
# Store help request in Redis
halps = helps.Helps(config['redis'])
stored = halps.store(channel_name, username, data.get('ts'))
if stored:
text = "%s needs help in %s!" % (username, channel_name)
outputs.append([admin_channel, text, message_attrs])
text = ("Thanks %s, your help request has been received."
" A DevOps Support Racker will be with you shortly." %
username)
outputs.append([channel, text, message_attrs])
| mit |
santoshphilip/eppy | eppy/useful_scripts/eppyreadtest_file.py | 1 | 2848 | # Copyright (c) 2012 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""
script to test idf reads in eppy.
Use to read test a single idf file
This may may be one of the files that failed when running eppyreadtest_folder.py
-
eppyreadtest_file.py will output two files:
1. simpleread.idf
2. eppyread.idf
The idf objects in both are sorted in ascending order. The files should be
identical. If there is a mismatch, eppy made a mistake in the read. Comparing
the two files will show you where the error occurred.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = "../../"
sys.path.append(pathnameto_eppy)
import os
from eppy.modeleditor import IDF
import eppy.simpleread as simpleread
# iddfile = '/Applications/EnergyPlus-8-1-0/Energy+.idd'
# folder = '/Applications/EnergyPlus-8-1-0/ExampleFiles'
# python eppyreadtest_file.py '/Applications/EnergyPlus-8-1-0/Energy+.idd'
# '/Applications/EnergyPlus-8-1-0/ExampleFiles/RetailPackagedTESCoil.idf'
# for non-failure -> SeriesActiveBranch.idf
# python eppyreadtest_file.py '/Applications/EnergyPlus-8-1-0/Energy+.idd'
# '/Applications/EnergyPlus-8-1-0/ExampleFiles/SeriesActiveBranch.idf'
def doreadtest(iddfile, folder, silent=False):
"""print out all the readtest results"""
iddhandle = open(iddfile, "r")
fname1 = thefile
idfhandle1 = open(fname1, "rb")
idfhandle2 = open(fname1, "rb")
verbose = not silent
result = simpleread.idfreadtest(
iddhandle, idfhandle1, idfhandle2, verbose=verbose, save=False
)
# print result, fname
if result == False and (not silent):
print("first mismatch at the above line numbers")
print("full filepath of file that failed the read test ->")
print(" %s" % (fname1,))
print()
print("compare files 'simpleread.idf' vs 'eppyread.idf'")
print()
else:
print("file passed test")
idfhandle1.close()
idfhandle2.close()
iddhandle.close()
if __name__ == "__main__":
# do the argparse stuff
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"idd", action="store", help="location of idd file. ./somewhere/eplusv8-0-1.idd"
)
parser.add_argument(
"thefile",
action="store",
help="location the idf file. ./somewhere/examples/thisfile.idf",
)
nspace = parser.parse_args()
thefile = nspace.thefile
iddfile = nspace.idd
doreadtest(iddfile, thefile)
| mit |
Pfeter/SpaceShooter | background.py | 1 | 1843 | import sys, random
import pygame
from pygame.locals import *
class Screen(object):
def __init__(self, width=640, height=400, fps=60, stars=200):
self.running = True
self.fps = fps
self.playtime = 0.0
self.total_stars = stars
pygame.init()
pygame.display.set_caption("Press ESC to quit")
self.width = width
self.height = height
self.screen = pygame.display.set_mode((self.width, self.height), pygame.DOUBLEBUF)
self.background = pygame.Surface(self.screen.get_size()).convert()
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont('mono', 20, bold=True)
self.stars = self.generate_stars()
self.main()
def generate_stars(self):
return [[random.randint(0, self.width), random.randint(0, self.height)] for i in range(self.total_stars)]
def draw_stars(self):
for star in self.stars:
pygame.draw.line(self.background,
(255, 255, 255), (star[0], star[1]), (star[0], star[1]))
star[0] = star[0] - 1
if star[0] < 0:
star[0] = self.width
star[1] = random.randint(0, self.height)
def exit(self):
pygame.quit()
sys.exit(2)
def fps_and_playtime_caption(self):
text = "FPS: {0:.2f} Playtime: {1:.2f}".format(self.clock.get_fps(), self.playtime)
pygame.display.set_caption(text)
def main(self):
while self.running:
milliseconds = self.clock.tick(self.fps)
self.playtime += milliseconds / 1000.0
for event in pygame.event.get():
if event.type == QUIT:
self.running = False
self.exit()
elif event.type == KEYDOWN and event.key == K_ESCAPE:
self.running = False
self.exit()
self.fps_and_playtime_caption()
self.background.fill((0, 0, 0))
self.draw_stars()
self.screen.blit(self.background, (0, 0))
pygame.display.update()
if __name__ == '__main__':
Screen(width=800, height=800, stars=600, fps=100)
| mit |
robbiet480/home-assistant | homeassistant/components/thinkingcleaner/sensor.py | 6 | 3836 | """Support for ThinkingCleaner sensors."""
from datetime import timedelta
import logging
from pythinkingcleaner import Discovery, ThinkingCleaner
import voluptuous as vol
from homeassistant import util
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, UNIT_PERCENTAGE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
SENSOR_TYPES = {
"battery": ["Battery", UNIT_PERCENTAGE, "mdi:battery"],
"state": ["State", None, None],
"capacity": ["Capacity", None, None],
}
STATES = {
"st_base": "On homebase: Not Charging",
"st_base_recon": "On homebase: Reconditioning Charging",
"st_base_full": "On homebase: Full Charging",
"st_base_trickle": "On homebase: Trickle Charging",
"st_base_wait": "On homebase: Waiting",
"st_plug": "Plugged in: Not Charging",
"st_plug_recon": "Plugged in: Reconditioning Charging",
"st_plug_full": "Plugged in: Full Charging",
"st_plug_trickle": "Plugged in: Trickle Charging",
"st_plug_wait": "Plugged in: Waiting",
"st_stopped": "Stopped",
"st_clean": "Cleaning",
"st_cleanstop": "Stopped with cleaning",
"st_clean_spot": "Spot cleaning",
"st_clean_max": "Max cleaning",
"st_delayed": "Delayed cleaning will start soon",
"st_dock": "Searching Homebase",
"st_pickup": "Roomba picked up",
"st_remote": "Remote control driving",
"st_wait": "Waiting for command",
"st_off": "Off",
"st_error": "Error",
"st_locate": "Find me!",
"st_unknown": "Unknown state",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Optional(CONF_HOST): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ThinkingCleaner platform."""
host = config.get(CONF_HOST)
if host:
devices = [ThinkingCleaner(host, "unknown")]
else:
discovery = Discovery()
devices = discovery.discover()
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_devices():
"""Update all devices."""
for device_object in devices:
device_object.update()
dev = []
for device in devices:
for type_name in SENSOR_TYPES:
dev.append(ThinkingCleanerSensor(device, type_name, update_devices))
add_entities(dev)
class ThinkingCleanerSensor(Entity):
"""Representation of a ThinkingCleaner Sensor."""
def __init__(self, tc_object, sensor_type, update_devices):
"""Initialize the ThinkingCleaner."""
self.type = sensor_type
self._tc_object = tc_object
self._update_devices = update_devices
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._tc_object.name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Update the sensor."""
self._update_devices()
if self.type == "battery":
self._state = self._tc_object.battery
elif self.type == "state":
self._state = STATES[self._tc_object.status]
elif self.type == "capacity":
self._state = self._tc_object.capacity
| apache-2.0 |
alexforencich/xfcp | lib/eth/example/DE2-115/fpga/tb/fpga_core/test_fpga_core.py | 2 | 7452 | """
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.triggers import RisingEdge, Timer
from cocotbext.eth import GmiiFrame, RgmiiPhy
class TB:
def __init__(self, dut, speed=1000e6):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
self.rgmii_phy0 = RgmiiPhy(dut.phy0_txd, dut.phy0_tx_ctl, dut.phy0_tx_clk,
dut.phy0_rxd, dut.phy0_rx_ctl, dut.phy0_rx_clk, speed=speed)
self.rgmii_phy1 = RgmiiPhy(dut.phy1_txd, dut.phy1_tx_ctl, dut.phy1_tx_clk,
dut.phy1_rxd, dut.phy1_rx_ctl, dut.phy1_rx_clk, speed=speed)
dut.phy0_int_n.setimmediatevalue(1)
dut.phy1_int_n.setimmediatevalue(1)
dut.btn.setimmediatevalue(0)
dut.sw.setimmediatevalue(0)
dut.clk.setimmediatevalue(0)
dut.clk90.setimmediatevalue(0)
cocotb.fork(self._run_clk())
async def init(self):
self.dut.rst.setimmediatevalue(0)
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
async def _run_clk(self):
t = Timer(2, 'ns')
while True:
self.dut.clk <= 1
await t
self.dut.clk90 <= 1
await t
self.dut.clk <= 0
await t
self.dut.clk90 <= 0
await t
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await tb.init()
tb.log.info("test UDP RX packet")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00')
ip = IP(src='192.168.1.100', dst='192.168.1.128')
udp = UDP(sport=5678, dport=1234)
test_pkt = eth / ip / udp / payload
test_frame = GmiiFrame.from_payload(test_pkt.build())
await tb.rgmii_phy0.rx.send(test_frame)
tb.log.info("receive ARP request")
rx_frame = await tb.rgmii_phy0.tx.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == 'ff:ff:ff:ff:ff:ff'
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[ARP].hwtype == 1
assert rx_pkt[ARP].ptype == 0x0800
assert rx_pkt[ARP].hwlen == 6
assert rx_pkt[ARP].plen == 4
assert rx_pkt[ARP].op == 1
assert rx_pkt[ARP].hwsrc == test_pkt.dst
assert rx_pkt[ARP].psrc == test_pkt[IP].dst
assert rx_pkt[ARP].hwdst == '00:00:00:00:00:00'
assert rx_pkt[ARP].pdst == test_pkt[IP].src
tb.log.info("send ARP response")
eth = Ether(src=test_pkt.src, dst=test_pkt.dst)
arp = ARP(hwtype=1, ptype=0x0800, hwlen=6, plen=4, op=2,
hwsrc=test_pkt.src, psrc=test_pkt[IP].src,
hwdst=test_pkt.dst, pdst=test_pkt[IP].dst)
resp_pkt = eth / arp
resp_frame = GmiiFrame.from_payload(resp_pkt.build())
await tb.rgmii_phy0.rx.send(resp_frame)
tb.log.info("receive UDP packet")
rx_frame = await tb.rgmii_phy0.tx.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == test_pkt.src
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[IP].dst == test_pkt[IP].src
assert rx_pkt[IP].src == test_pkt[IP].dst
assert rx_pkt[UDP].dport == test_pkt[UDP].sport
assert rx_pkt[UDP].sport == test_pkt[UDP].dport
assert rx_pkt[UDP].payload == test_pkt[UDP].payload
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'lib', 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "hex_display.v"),
os.path.join(eth_rtl_dir, "eth_mac_1g_rgmii_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_1g_rgmii.v"),
os.path.join(eth_rtl_dir, "iddr.v"),
os.path.join(eth_rtl_dir, "oddr.v"),
os.path.join(eth_rtl_dir, "ssio_ddr_in.v"),
os.path.join(eth_rtl_dir, "rgmii_phy_if.v"),
os.path.join(eth_rtl_dir, "eth_mac_1g.v"),
os.path.join(eth_rtl_dir, "axis_gmii_rx.v"),
os.path.join(eth_rtl_dir, "axis_gmii_tx.v"),
os.path.join(eth_rtl_dir, "lfsr.v"),
os.path.join(eth_rtl_dir, "eth_axis_rx.v"),
os.path.join(eth_rtl_dir, "eth_axis_tx.v"),
os.path.join(eth_rtl_dir, "udp_complete.v"),
os.path.join(eth_rtl_dir, "udp_checksum_gen.v"),
os.path.join(eth_rtl_dir, "udp.v"),
os.path.join(eth_rtl_dir, "udp_ip_rx.v"),
os.path.join(eth_rtl_dir, "udp_ip_tx.v"),
os.path.join(eth_rtl_dir, "ip_complete.v"),
os.path.join(eth_rtl_dir, "ip.v"),
os.path.join(eth_rtl_dir, "ip_eth_rx.v"),
os.path.join(eth_rtl_dir, "ip_eth_tx.v"),
os.path.join(eth_rtl_dir, "ip_arb_mux.v"),
os.path.join(eth_rtl_dir, "arp.v"),
os.path.join(eth_rtl_dir, "arp_cache.v"),
os.path.join(eth_rtl_dir, "arp_eth_rx.v"),
os.path.join(eth_rtl_dir, "arp_eth_tx.v"),
os.path.join(eth_rtl_dir, "eth_arb_mux.v"),
os.path.join(axis_rtl_dir, "arbiter.v"),
os.path.join(axis_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
]
parameters = {}
# parameters['A'] = val
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| mit |
nopjmp/SickRage | lib/tornado/httpclient.py | 19 | 27583 | """Blocking and non-blocking HTTP client interfaces.
This module defines a common interface shared by two implementations,
``simple_httpclient`` and ``curl_httpclient``. Applications may either
instantiate their chosen implementation class directly or use the
`AsyncHTTPClient` class from this module, which selects an implementation
that can be overridden with the `AsyncHTTPClient.configure` method.
The default implementation is ``simple_httpclient``, and this is expected
to be suitable for most users' needs. However, some applications may wish
to switch to ``curl_httpclient`` for reasons such as the following:
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
including support for HTTP proxies and the ability to use a specified
network interface.
* ``curl_httpclient`` is more likely to be compatible with sites that are
not-quite-compliant with the HTTP spec, or sites that use little-exercised
features of HTTP.
* ``curl_httpclient`` is faster.
* ``curl_httpclient`` was the default prior to Tornado 2.0.
Note that if you are using ``curl_httpclient``, it is highly
recommended that you use a recent version of ``libcurl`` and
``pycurl``. Currently the minimum supported version of libcurl is
7.22.0, and the minimum version of pycurl is 7.18.2. It is highly
recommended that your ``libcurl`` installation is built with
asynchronous DNS resolver (threaded or c-ares), otherwise you may
encounter various problems with request timeouts (for more
information, see
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
and comments in curl_httpclient.py).
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
from __future__ import absolute_import, division, print_function
import functools
import time
import weakref
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str
from tornado import httputil, stack_context
from tornado.ioloop import IOLoop
from tornado.util import Configurable
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided for convenience and testing; most applications
that are running an IOLoop will want to use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print(response.body)
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
"""
def __init__(self, async_client_class=None, **kwargs):
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
self._async_client = async_client_class(self._io_loop, **kwargs)
self._closed = False
def __del__(self):
self.close()
def close(self):
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(self, request, **kwargs):
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError` unless
the ``raise_error`` keyword argument is set to False.
"""
response = self._io_loop.run_sync(functools.partial(
self._async_client.fetch, request, **kwargs))
return response
class AsyncHTTPClient(Configurable):
"""An non-blocking HTTP client.
Example usage::
def handle_response(response):
if response.error:
print("Error: %s" % response.error)
else:
print(response.body)
http_client = AsyncHTTPClient()
http_client.fetch("http://www.google.com/", handle_response)
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments other than
``io_loop`` should be passed to the `AsyncHTTPClient` constructor.
The implementation subclass as well as arguments to its
constructor can be set with the static method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for
`HTTPRequest` attributes. For example::
AsyncHTTPClient.configure(
None, defaults=dict(user_agent="MyUserAgent"))
# or with force_instance:
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
@classmethod
def configurable_base(cls):
return AsyncHTTPClient
@classmethod
def configurable_default(cls):
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls):
attr_name = '_async_client_dict_' + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
io_loop = io_loop or IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
**kwargs)
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
# SimpleAsyncHTTPClient.
instance._instance_cache = instance_cache
if instance_cache is not None:
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, io_loop, defaults=None):
self.io_loop = io_loop
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
self._closed = False
def close(self):
"""Destroys this HTTP client, freeing any file descriptors used.
This method is **not needed in normal use** due to the way
that `AsyncHTTPClient` objects are transparently reused.
``close()`` is generally only necessary when either the
`.IOLoop` is also being closed, or the ``force_instance=True``
argument was used when creating the `AsyncHTTPClient`.
No other methods may be called on the `AsyncHTTPClient` after
``close()``.
"""
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
if self._instance_cache.get(self.io_loop) is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
del self._instance_cache[self.io_loop]
def fetch(self, request, callback=None, raise_error=True, **kwargs):
"""Executes a request, asynchronously returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
This method returns a `.Future` whose result is an
`HTTPResponse`. By default, the ``Future`` will raise an
`HTTPError` if the request returned a non-200 response code
(other errors may also be raised if the server could not be
contacted). Instead, if ``raise_error`` is set to False, the
response will always be returned regardless of the response
code.
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
In the callback interface, `HTTPError` is not automatically raised.
Instead, you must check the response's ``error`` attribute or
call its `~HTTPResponse.rethrow` method.
"""
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
else:
if kwargs:
raise ValueError("kwargs can't be used if request is an HTTPRequest object")
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request = _RequestProxy(request, self.defaults)
future = TracebackFuture()
if callback is not None:
callback = stack_context.wrap(callback)
def handle_future(future):
exc = future.exception()
if isinstance(exc, HTTPError) and exc.response is not None:
response = exc.response
elif exc is not None:
response = HTTPResponse(
request, 599, error=exc,
request_time=time.time() - request.start_time)
else:
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
def handle_response(response):
if raise_error and response.error:
future.set_exception(response.error)
else:
future.set_result(response)
self.fetch_impl(request, handle_response)
return future
def fetch_impl(self, request, callback):
raise NotImplementedError()
@classmethod
def configure(cls, impl, **kwargs):
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP client request object."""
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password='',
allow_nonstandard_methods=False,
validate_cert=True)
def __init__(self, url, method="GET", headers=None, body=None,
auth_username=None, auth_password=None, auth_mode=None,
connect_timeout=None, request_timeout=None,
if_modified_since=None, follow_redirects=None,
max_redirects=None, user_agent=None, use_gzip=None,
network_interface=None, streaming_callback=None,
header_callback=None, prepare_curl_callback=None,
proxy_host=None, proxy_port=None, proxy_username=None,
proxy_password=None, proxy_auth_mode=None,
allow_nonstandard_methods=None, validate_cert=None,
ca_certs=None, allow_ipv6=None, client_key=None,
client_cert=None, body_producer=None,
expect_100_continue=False, decompress_response=None,
ssl_options=None):
r"""All parameters except ``url`` are optional.
:arg string url: URL to fetch
:arg string method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
the utf-8 encoding will be used)
:arg body_producer: Callable used for lazy/asynchronous request bodies.
It is called with one argument, a ``write`` function, and should
return a `.Future`. It should call the write function with new
data as it becomes available. The write function returns a
`.Future` which can be used for flow control.
Only one of ``body`` and ``body_producer`` may
be specified. ``body_producer`` is not supported on
``curl_httpclient``. When using ``body_producer`` it is recommended
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg string auth_username: Username for HTTP authentication
:arg string auth_password: Password for HTTP authentication
:arg string auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds,
default 20 seconds
:arg float request_timeout: Timeout for entire request in seconds,
default 20 seconds
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response? Default True.
:arg int max_redirects: Limit for ``follow_redirects``, default 5.
:arg string user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg string network_interface: Network interface to use for request.
``curl_httpclient`` only; see note below.
:arg callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg string proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
currently only supported with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg string proxy_username: HTTP proxy username
:arg string proxy_password: HTTP proxy password
:arg string proxy_auth_mode: HTTP proxy Authentication mode;
default is "basic". supports "basic" and "digest"
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument? Default is False.
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate? Default is True.
:arg string ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg string client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg string client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
and ``client_cert``.
:arg bool allow_ipv6: Use IPv6 when available? Default is true.
:arg bool expect_100_continue: If true, send the
``Expect: 100-continue`` header and wait for a continue response
before sending the request body. Only supported with
simple_httpclient.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
.. versionadded:: 4.0
The ``body_producer`` and ``expect_100_continue`` arguments.
.. versionadded:: 4.2
The ``ssl_options`` argument.
.. versionadded:: 4.5
The ``proxy_auth_mode`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.proxy_auth_mode = proxy_auth_mode
self.url = url
self.method = method
self.body = body
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
@property
def body_producer(self):
return self._body_producer
@body_producer.setter
def body_producer(self, value):
self._body_producer = stack_context.wrap(value)
@property
def streaming_callback(self):
return self._streaming_callback
@streaming_callback.setter
def streaming_callback(self, value):
self._streaming_callback = stack_context.wrap(value)
@property
def header_callback(self):
return self._header_callback
@header_callback.setter
def header_callback(self, value):
self._header_callback = stack_context.wrap(value)
@property
def prepare_curl_callback(self):
return self._prepare_curl_callback
@prepare_curl_callback.setter
def prepare_curl_callback(self, value):
self._prepare_curl_callback = stack_context.wrap(value)
class HTTPResponse(object):
"""HTTP Response object.
Attributes:
* request: HTTPRequest object
* code: numeric HTTP status code, e.g. 200 or 404
* reason: human-readable reason phrase describing the status code
* headers: `tornado.httputil.HTTPHeaders` object
* effective_url: final location of the resource after following any
redirects
* buffer: ``cStringIO`` object for response body
* body: response body as bytes (created on demand from ``self.buffer``)
* error: Exception object, if any
* request_time: seconds from request start to finish
* time_info: dictionary of diagnostic timing information from the request.
Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
"""
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, request_time=None,
time_info=None, reason=None):
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
if error is None:
if self.code < 200 or self.code >= 300:
self.error = HTTPError(self.code, message=self.reason,
response=self)
else:
self.error = None
else:
self.error = error
self.request_time = request_time
self.time_info = time_info or {}
@property
def body(self):
if self.buffer is None:
return None
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
def rethrow(self):
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self):
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
"""
def __init__(self, code, message=None, response=None):
self.code = code
self.message = message or httputil.responses.get(code, "Unknown")
self.response = response
super(HTTPError, self).__init__(code, message, response)
def __str__(self):
return "HTTP %d: %s" % (self.code, self.message)
# There is a cyclic reference between self and self.response,
# which breaks the default __repr__ implementation.
# (especially on pypy, which doesn't have the same recursion
# detection as cpython).
__repr__ = __str__
class _RequestProxy(object):
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(self, request, defaults):
self.request = request
self.defaults = defaults
def __getattr__(self, name):
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main():
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
icgood/pymap | pymap/plugin.py | 1 | 3579 |
from __future__ import annotations
from collections.abc import Callable, Iterable, Iterator, Mapping
from typing import TypeVar, Generic, Optional, Final
from pkg_resources import iter_entry_points, DistributionNotFound
__all__ = ['PluginT', 'Plugin']
#: The plugin type variable.
PluginT = TypeVar('PluginT')
class Plugin(Generic[PluginT], Iterable[tuple[str, PluginT]]):
"""Plugin system, typically loaded from :mod:`pkg_resources` `entry points
<https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata>`_.
>>> example: Plugin[type[Example]] = Plugin('plugins.example')
>>> example.add('two', ExampleTwo)
>>> example.registered
{'one': <class 'examples.ExampleOne'>,
'two': <class 'examples.ExampleTwo'>}
Note:
Plugins registered from *group* entry points are lazy-loaded. This is
to avoid cyclic imports.
Args:
group: The entry point group to load.
"""
def __init__(self, group: str, *, default: str = None) -> None:
super().__init__()
self.group: Final = group
self._default = default
self._loaded: Optional[dict[str, PluginT]] = None
self._added: dict[str, PluginT] = {}
def __iter__(self) -> Iterator[tuple[str, PluginT]]:
return iter(self.registered.items())
@property
def registered(self) -> Mapping[str, PluginT]:
"""A mapping of the registered plugins, keyed by name."""
loaded = self._load()
return {**loaded, **self._added}
@property
def default(self) -> PluginT:
"""The default plugin implementation.
This property may also be assigned a new string value to change the
name of the default plugin.
>>> example: Plugin[type[Example]] = Plugin('plugins.example',
... default='one')
>>> example.default
<class 'examples.ExampleOne'>
>>> example.default = 'two'
>>> example.default
<class 'examples.ExampleTwo'>
Raises:
KeyError: The default plugin name was not registered.
"""
if self._default is None:
raise KeyError(f'{self.group!r} has no default plugin')
else:
return self.registered[self._default]
@default.setter
def default(self, default: Optional[str]) -> None:
self._default = default
def _load(self) -> Mapping[str, PluginT]:
loaded = self._loaded
if loaded is None:
loaded = {}
for entry_point in iter_entry_points(self.group):
try:
plugin: PluginT = entry_point.load()
except DistributionNotFound:
pass # optional dependencies not installed
else:
loaded[entry_point.name] = plugin
self._loaded = loaded
return loaded
def add(self, name: str, plugin: PluginT) -> None:
"""Add a new plugin by name.
Args:
name: The identifying name of the plugin.
plugin: The plugin object.
"""
self._added[name] = plugin
def register(self, name: str) -> Callable[[PluginT], PluginT]:
"""Decorates a plugin implementation.
Args:
name: The identifying name of the plugin.
"""
def deco(plugin: PluginT) -> PluginT:
self.add(name, plugin)
return plugin
return deco
def __repr__(self) -> str:
return f'Plugin({self.group!r})'
| mit |
TakayukiSakai/tensorflow | tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py | 19 | 2674 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(tf.zeros([1024, 1024]))
adds = [tf.assign_add(p, ones_t, use_locking=False)
for _ in range(20)]
tf.initialize_all_variables().run()
def run_add(add_op):
sess.run(add_op)
threads = [self.checkedThread(target=run_add, args=(add_op,))
for add_op in adds]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], float(1))
p = tf.Variable(tf.zeros([1024, 1024]))
assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False)
for i in range(1, 21)]
tf.initialize_all_variables().run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
for assign_op in assigns]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.