code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.precise import Precise
class bigone(Exchange):
def describe(self):
return self.deep_extend(super(bigone, self).describe(), {
'id': 'bigone',
'name': 'BigONE',
'countries': ['CN'],
'version': 'v3',
'rateLimit': 1200, # 500 request per 10 minutes
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': 'min1',
'5m': 'min5',
'15m': 'min15',
'30m': 'min30',
'1h': 'hour1',
'3h': 'hour3',
'4h': 'hour4',
'6h': 'hour6',
'12h': 'hour12',
'1d': 'day1',
'1w': 'week1',
'1M': 'month1',
},
'hostname': 'big.one', # or 'bigone.com'
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/69354403-1d532180-0c91-11ea-88ed-44c06cefdf87.jpg',
'api': {
'public': 'https://{hostname}/api/v3',
'private': 'https://{hostname}/api/v3/viewer',
},
'www': 'https://big.one',
'doc': 'https://open.big.one/docs/api.html',
'fees': 'https://bigone.zendesk.com/hc/en-us/articles/115001933374-BigONE-Fee-Policy',
'referral': 'https://b1.run/users/new?code=D3LLBVFT',
},
'api': {
'public': {
'get': [
'ping',
'asset_pairs',
'asset_pairs/{asset_pair_name}/depth',
'asset_pairs/{asset_pair_name}/trades',
'asset_pairs/{asset_pair_name}/ticker',
'asset_pairs/{asset_pair_name}/candles',
'asset_pairs/tickers',
],
},
'private': {
'get': [
'accounts',
'fund/accounts',
'assets/{asset_symbol}/address',
'orders',
'orders/{id}',
'orders/multi',
'trades',
'withdrawals',
'deposits',
],
'post': [
'orders',
'orders/{id}/cancel',
'orders/cancel',
'withdrawals',
'transfer',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
# HARDCODING IS DEPRECATED THE FEES BELOW ARE TO BE REMOVED SOON
'withdraw': {
'BTC': 0.001,
'ETH': 0.005,
'EOS': 0.01,
'ZEC': 0.003,
'LTC': 0.01,
'QTUM': 0.01,
# 'INK': 0.01 QTUM,
# 'BOT': 0.01 QTUM,
'ETC': 0.01,
'GAS': 0.0,
'BTS': 1.0,
'GXS': 0.1,
'BITCNY': 19.0,
},
},
},
'exceptions': {
'exact': {
'10001': BadRequest, # syntax error
'10005': ExchangeError, # internal error
"Amount's scale must greater than AssetPair's base scale": InvalidOrder,
'10007': BadRequest, # parameter error, {"code":10007,"message":"Amount's scale must greater than AssetPair's base scale"}
'10011': ExchangeError, # system error
'10013': OrderNotFound, # {"code":10013,"message":"Resource not found"}
'10014': InsufficientFunds, # {"code":10014,"message":"Insufficient funds"}
'10403': PermissionDenied, # permission denied
'10429': RateLimitExceeded, # too many requests
'40004': AuthenticationError, # {"code":40004,"message":"invalid jwt"}
'40103': AuthenticationError, # invalid otp code
'40104': AuthenticationError, # invalid asset pin code
'40301': PermissionDenied, # {"code":40301,"message":"Permission denied withdrawal create"}
'40302': ExchangeError, # already requested
'40601': ExchangeError, # resource is locked
'40602': ExchangeError, # resource is depleted
'40603': InsufficientFunds, # insufficient resource
'40120': InvalidOrder, # Order is in trading
'40121': InvalidOrder, # Order is already cancelled or filled
},
'broad': {
},
},
'commonCurrencies': {
'MBN': 'Mobilian Coin',
'ONE': 'BigONE Token',
},
})
def fetch_markets(self, params={}):
response = self.publicGetAssetPairs(params)
#
# {
# "code":0,
# "data":[
# {
# "id":"01e48809-b42f-4a38-96b1-c4c547365db1",
# "name":"PCX-BTC",
# "quote_scale":7,
# "quote_asset":{
# "id":"0df9c3c3-255a-46d7-ab82-dedae169fba9",
# "symbol":"BTC",
# "name":"Bitcoin",
# },
# "base_asset":{
# "id":"405484f7-4b03-4378-a9c1-2bd718ecab51",
# "symbol":"PCX",
# "name":"ChainX",
# },
# "base_scale":3,
# "min_quote_value":"0.0001",
# },
# ]
# }
#
markets = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'name')
uuid = self.safe_string(market, 'id')
baseAsset = self.safe_value(market, 'base_asset', {})
quoteAsset = self.safe_value(market, 'quote_asset', {})
baseId = self.safe_string(baseAsset, 'symbol')
quoteId = self.safe_string(quoteAsset, 'symbol')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
amountPrecisionString = self.safe_string(market, 'base_scale')
pricePrecisionString = self.safe_string(market, 'quote_scale')
amountLimit = self.parse_precision(amountPrecisionString)
priceLimit = self.parse_precision(pricePrecisionString)
precision = {
'amount': int(amountPrecisionString),
'price': int(pricePrecisionString),
}
minCost = self.safe_integer(market, 'min_quote_value')
entry = {
'id': id,
'uuid': uuid,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.parse_number(amountLimit),
'max': None,
},
'price': {
'min': self.parse_number(priceLimit),
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
}
result.append(entry)
return result
def load_markets(self, reload=False, params={}):
markets = super(bigone, self).load_markets(reload, params)
marketsByUuid = self.safe_value(self.options, 'marketsByUuid')
if (marketsByUuid is None) or reload:
marketsByUuid = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.markets[symbol]
uuid = self.safe_string(market, 'uuid')
marketsByUuid[uuid] = market
self.options['marketsByUuid'] = marketsByUuid
return markets
def parse_ticker(self, ticker, market=None):
#
# {
# "asset_pair_name":"ETH-BTC",
# "bid":{"price":"0.021593","order_count":1,"quantity":"0.20936"},
# "ask":{"price":"0.021613","order_count":1,"quantity":"2.87064"},
# "open":"0.021795",
# "high":"0.021795",
# "low":"0.021471",
# "close":"0.021613",
# "volume":"117078.90431",
# "daily_change":"-0.000182"
# }
#
marketId = self.safe_string(ticker, 'asset_pair_name')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = None
close = self.safe_number(ticker, 'close')
bid = self.safe_value(ticker, 'bid', {})
ask = self.safe_value(ticker, 'ask', {})
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(bid, 'price'),
'bidVolume': self.safe_number(bid, 'quantity'),
'ask': self.safe_number(ask, 'price'),
'askVolume': self.safe_number(ask, 'quantity'),
'vwap': None,
'open': self.safe_number(ticker, 'open'),
'close': close,
'last': close,
'previousClose': None,
'change': self.safe_number(ticker, 'daily_change'),
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
response = self.publicGetAssetPairsAssetPairNameTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "asset_pair_name":"ETH-BTC",
# "bid":{"price":"0.021593","order_count":1,"quantity":"0.20936"},
# "ask":{"price":"0.021613","order_count":1,"quantity":"2.87064"},
# "open":"0.021795",
# "high":"0.021795",
# "low":"0.021471",
# "close":"0.021613",
# "volume":"117078.90431",
# "daily_change":"-0.000182"
# }
# }
#
ticker = self.safe_value(response, 'data', {})
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['pair_names'] = ','.join(ids)
response = self.publicGetAssetPairsTickers(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "asset_pair_name":"PCX-BTC",
# "bid":{"price":"0.000234","order_count":1,"quantity":"0.518"},
# "ask":{"price":"0.0002348","order_count":1,"quantity":"2.348"},
# "open":"0.0002343",
# "high":"0.0002348",
# "low":"0.0002162",
# "close":"0.0002348",
# "volume":"12887.016",
# "daily_change":"0.0000005"
# },
# {
# "asset_pair_name":"GXC-USDT",
# "bid":{"price":"0.5054","order_count":1,"quantity":"40.53"},
# "ask":{"price":"0.5055","order_count":1,"quantity":"38.53"},
# "open":"0.5262",
# "high":"0.5323",
# "low":"0.5055",
# "close":"0.5055",
# "volume":"603963.05",
# "daily_change":"-0.0207"
# }
# ]
# }
#
tickers = self.safe_value(response, 'data', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_time(self, params={}):
response = self.publicGetPing(params)
#
# {
# "data": {
# "timestamp": 1527665262168391000
# }
# }
#
data = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(data, 'timestamp')
return int(timestamp / 1000000)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
if limit is not None:
request['limit'] = limit # default 50, max 200
response = self.publicGetAssetPairsAssetPairNameDepth(self.extend(request, params))
#
# {
# "code":0,
# "data": {
# "asset_pair_name": "EOS-BTC",
# "bids": [
# {"price": "42", "order_count": 4, "quantity": "23.33363711"}
# ],
# "asks": [
# {"price": "45", "order_count": 2, "quantity": "4193.3283464"}
# ]
# }
# }
#
orderbook = self.safe_value(response, 'data', {})
return self.parse_order_book(orderbook, symbol, None, 'bids', 'asks', 'price', 'quantity')
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id": 38199941,
# "price": "3378.67",
# "amount": "0.019812",
# "taker_side": "ASK",
# "created_at": "2019-01-29T06:05:56Z"
# }
#
# fetchMyTrades(private)
#
# {
# "id": 10854280,
# "asset_pair_name": "XIN-USDT",
# "price": "70",
# "amount": "1",
# "taker_side": "ASK",
# "maker_order_id": 58284908,
# "taker_order_id": 58284909,
# "maker_fee": "0.0008",
# "taker_fee": "0.07",
# "side": "SELF_TRADING",
# "inserted_at": "2019-04-16T12:00:01Z"
# },
#
# {
# "id": 10854263,
# "asset_pair_name": "XIN-USDT",
# "price": "75.7",
# "amount": "12.743149",
# "taker_side": "BID",
# "maker_order_id": null,
# "taker_order_id": 58284888,
# "maker_fee": null,
# "taker_fee": "0.0025486298",
# "side": "BID",
# "inserted_at": "2019-04-15T06:20:57Z"
# }
#
timestamp = self.parse8601(self.safe_string_2(trade, 'created_at', 'inserted_at'))
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
marketId = self.safe_string(trade, 'asset_pair_name')
symbol = self.safe_symbol(marketId, market, '-')
side = self.safe_string(trade, 'side')
takerSide = self.safe_string(trade, 'taker_side')
takerOrMaker = None
if (takerSide is not None) and (side is not None) and (side != 'SELF_TRADING'):
takerOrMaker = 'taker' if (takerSide == side) else 'maker'
if side is None:
# taker side is not related to buy/sell side
# the following code is probably a mistake
side = 'sell' if (takerSide == 'ASK') else 'buy'
else:
if side == 'BID':
side = 'buy'
elif side == 'ASK':
side = 'sell'
makerOrderId = self.safe_string(trade, 'maker_order_id')
takerOrderId = self.safe_string(trade, 'taker_order_id')
orderId = None
if makerOrderId is not None:
if takerOrderId is not None:
orderId = [makerOrderId, takerOrderId]
else:
orderId = makerOrderId
elif takerOrderId is not None:
orderId = takerOrderId
id = self.safe_string(trade, 'id')
result = {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': 'limit',
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': float(cost),
'info': trade,
}
makerCurrencyCode = None
takerCurrencyCode = None
if (market is not None) and (takerOrMaker is not None):
if side == 'buy':
if takerOrMaker == 'maker':
makerCurrencyCode = market['base']
takerCurrencyCode = market['quote']
else:
makerCurrencyCode = market['quote']
takerCurrencyCode = market['base']
else:
if takerOrMaker == 'maker':
makerCurrencyCode = market['quote']
takerCurrencyCode = market['base']
else:
makerCurrencyCode = market['base']
takerCurrencyCode = market['quote']
elif side == 'SELF_TRADING':
if takerSide == 'BID':
makerCurrencyCode = market['quote']
takerCurrencyCode = market['base']
elif takerSide == 'ASK':
makerCurrencyCode = market['base']
takerCurrencyCode = market['quote']
makerFeeCost = self.safe_number(trade, 'maker_fee')
takerFeeCost = self.safe_number(trade, 'taker_fee')
if makerFeeCost is not None:
if takerFeeCost is not None:
result['fees'] = [
{'cost': makerFeeCost, 'currency': makerCurrencyCode},
{'cost': takerFeeCost, 'currency': takerCurrencyCode},
]
else:
result['fee'] = {'cost': makerFeeCost, 'currency': makerCurrencyCode}
elif takerFeeCost is not None:
result['fee'] = {'cost': takerFeeCost, 'currency': takerCurrencyCode}
else:
result['fee'] = None
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
response = self.publicGetAssetPairsAssetPairNameTrades(self.extend(request, params))
#
# {
# "code": 0,
# "data": [
# {
# "id": 38199941,
# "price": "3378.67",
# "amount": "0.019812",
# "taker_side": "ASK",
# "created_at": "2019-01-29T06:05:56Z"
# },
# {
# "id": 38199934,
# "price": "3376.14",
# "amount": "0.019384",
# "taker_side": "ASK",
# "created_at": "2019-01-29T06:05:40Z"
# }
# ]
# }
#
trades = self.safe_value(response, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# close: '0.021562',
# high: '0.021563',
# low: '0.02156',
# open: '0.021563',
# time: '2019-11-21T07:54:00Z',
# volume: '59.84376'
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100 # default 100, max 500
request = {
'asset_pair_name': market['id'],
'period': self.timeframes[timeframe],
'limit': limit,
}
if since is not None:
# start = int(since / 1000)
end = self.sum(since, limit * self.parse_timeframe(timeframe) * 1000)
request['time'] = self.iso8601(end)
response = self.publicGetAssetPairsAssetPairNameCandles(self.extend(request, params))
#
# {
# code: 0,
# data: [
# {
# close: '0.021656',
# high: '0.021658',
# low: '0.021652',
# open: '0.021652',
# time: '2019-11-21T09:30:00Z',
# volume: '53.08664'
# },
# {
# close: '0.021652',
# high: '0.021656',
# low: '0.021652',
# open: '0.021656',
# time: '2019-11-21T09:29:00Z',
# volume: '88.39861'
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
type = self.safe_string(params, 'type', '')
params = self.omit(params, 'type')
method = 'privateGet' + self.capitalize(type) + 'Accounts'
response = getattr(self, method)(params)
#
# {
# "code":0,
# "data":[
# {"asset_symbol":"NKC","balance":"0","locked_balance":"0"},
# {"asset_symbol":"UBTC","balance":"0","locked_balance":"0"},
# {"asset_symbol":"READ","balance":"0","locked_balance":"0"},
# ],
# }
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
balances = self.safe_value(response, 'data', [])
for i in range(0, len(balances)):
balance = balances[i]
symbol = self.safe_string(balance, 'asset_symbol')
code = self.safe_currency_code(symbol)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'locked_balance')
result[code] = account
return self.parse_balance(result, False)
def parse_order(self, order, market=None):
#
# {
# "id": 10,
# "asset_pair_name": "EOS-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z",
# }
#
id = self.safe_string(order, 'id')
marketId = self.safe_string(order, 'asset_pair_name')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'amount')
filled = self.safe_number(order, 'filled_amount')
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
if side == 'BID':
side = 'buy'
else:
side = 'sell'
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'updated_at'))
average = self.safe_number(order, 'avg_deal_price')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': None,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': None,
'trades': None,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
side = 'BID' if (side == 'buy') else 'ASK'
uppercaseType = type.upper()
request = {
'asset_pair_name': market['id'], # asset pair name BTC-USDT, required
'side': side, # order side one of "ASK"/"BID", required
'amount': self.amount_to_precision(symbol, amount), # order amount, string, required
# 'price': self.price_to_precision(symbol, price), # order price, string, required
'type': uppercaseType,
# 'operator': 'GTE', # stop orders only, GTE greater than and equal, LTE less than and equal
# 'immediate_or_cancel': False, # limit orders only, must be False when post_only is True
# 'post_only': False, # limit orders only, must be False when immediate_or_cancel is True
}
if uppercaseType == 'LIMIT':
request['price'] = self.price_to_precision(symbol, price)
else:
isStopLimit = (uppercaseType == 'STOP_LIMIT')
isStopMarket = (uppercaseType == 'STOP_MARKET')
if isStopLimit or isStopMarket:
stopPrice = self.safe_number_2(params, 'stop_price', 'stopPrice')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stop_price parameter')
request['stop_price'] = self.price_to_precision(symbol, stopPrice)
params = self.omit(params, ['stop_price', 'stopPrice'])
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "id": 10,
# "asset_pair_name": "EOS-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z"
# }
#
order = self.safe_value(response, 'data')
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'id': id}
response = self.privatePostOrdersIdCancel(self.extend(request, params))
# {
# "id": 10,
# "asset_pair_name": "EOS-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "CANCELLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z"
# }
order = self.safe_value(response, 'data')
return self.parse_order(order)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
response = self.privatePostOrdersCancel(self.extend(request, params))
#
# {
# "code":0,
# "data": {
# "cancelled":[
# 58272370,
# 58272377
# ],
# "failed": []
# }
# }
#
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'id': id}
response = self.privateGetOrdersId(self.extend(request, params))
order = self.safe_value(response, 'data', {})
return self.parse_order(order)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
# 'page_token': 'dxzef', # request page after self page token
# 'side': 'ASK', # 'ASK' or 'BID', optional
# 'state': 'FILLED', # 'CANCELLED', 'FILLED', 'PENDING'
# 'limit' 20, # default 20, max 200
}
if limit is not None:
request['limit'] = limit # default 20, max 200
response = self.privateGetOrders(self.extend(request, params))
#
# {
# "code":0,
# "data": [
# {
# "id": 10,
# "asset_pair_name": "ETH-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z",
# },
# ],
# "page_token":"dxzef",
# }
#
orders = self.safe_value(response, 'data', [])
return self.parse_orders(orders, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
# 'page_token': 'dxzef', # request page after self page token
}
if limit is not None:
request['limit'] = limit # default 20, max 200
response = self.privateGetTrades(self.extend(request, params))
#
# {
# "code": 0,
# "data": [
# {
# "id": 10854280,
# "asset_pair_name": "XIN-USDT",
# "price": "70",
# "amount": "1",
# "taker_side": "ASK",
# "maker_order_id": 58284908,
# "taker_order_id": 58284909,
# "maker_fee": "0.0008",
# "taker_fee": "0.07",
# "side": "SELF_TRADING",
# "inserted_at": "2019-04-16T12:00:01Z"
# },
# {
# "id": 10854263,
# "asset_pair_name": "XIN-USDT",
# "price": "75.7",
# "amount": "12.743149",
# "taker_side": "BID",
# "maker_order_id": null,
# "taker_order_id": 58284888,
# "maker_fee": null,
# "taker_fee": "0.0025486298",
# "side": "BID",
# "inserted_at": "2019-04-15T06:20:57Z"
# }
# ],
# "page_token":"dxfv"
# }
#
trades = self.safe_value(response, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_order_status(self, status):
statuses = {
'PENDING': 'open',
'FILLED': 'closed',
'CANCELLED': 'canceled',
}
return self.safe_string(statuses, status)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'state': 'PENDING',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'state': 'FILLED',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def nonce(self):
return self.microseconds() * 1000
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
baseUrl = self.implode_hostname(self.urls['api'][api])
url = baseUrl + '/' + self.implode_params(path, params)
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
request = {
'type': 'OpenAPIV2',
'sub': self.apiKey,
'nonce': nonce,
# 'recv_window': '30', # default 30
}
jwt = self.jwt(request, self.encode(self.secret))
headers = {
'Authorization': 'Bearer ' + jwt,
}
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset_symbol': currency['id'],
}
response = self.privateGetAssetsAssetSymbolAddress(self.extend(request, params))
#
# the actual response format is not the same as the documented one
# the data key contains an array in the actual response
#
# {
# "code":0,
# "message":"",
# "data":[
# {
# "id":5521878,
# "chain":"Bitcoin",
# "value":"1GbmyKoikhpiQVZ1C9sbF17mTyvBjeobVe",
# "memo":""
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
dataLength = len(data)
if dataLength < 1:
raise ExchangeError(self.id + 'fetchDepositAddress() returned empty address response')
firstElement = data[0]
address = self.safe_string(firstElement, 'value')
tag = self.safe_string(firstElement, 'memo')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
# what are other statuses here?
'WITHHOLD': 'ok', # deposits
'UNCONFIRMED': 'pending',
'CONFIRMED': 'ok', # withdrawals
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "amount": "25.0",
# "asset_symbol": "BTS"
# "confirms": 100,
# "id": 5,
# "inserted_at": "2018-02-16T11:39:58.000Z",
# "is_internal": False,
# "kind": "default",
# "memo": "",
# "state": "WITHHOLD",
# "txid": "72e03037d144dae3d32b68b5045462b1049a0755",
# "updated_at": "2018-11-09T10:20:09.000Z",
# }
#
# fetchWithdrawals
#
# {
# "amount": "5",
# "asset_symbol": "ETH",
# "completed_at": "2018-03-15T16:13:45.610463Z",
# "customer_id": "10",
# "id": 10,
# "inserted_at": "2018-03-15T16:13:45.610463Z",
# "is_internal": True,
# "note": "2018-03-15T16:13:45.610463Z",
# "state": "CONFIRMED",
# "target_address": "0x4643bb6b393ac20a6175c713175734a72517c63d6f7"
# "txid": "0x4643bb6b393ac20a6175c713175734a72517c63d6f73a3ca90a15356f2e967da0",
# }
#
# withdraw
#
# {
# "id":1077391,
# "customer_id":1082679,
# "amount":"21.9000000000000000",
# "txid":"",
# "is_internal":false,
# "kind":"on_chain",
# "state":"PENDING",
# "inserted_at":"2020-06-03T00:50:57+00:00",
# "updated_at":"2020-06-03T00:50:57+00:00",
# "memo":"",
# "target_address":"rDYtYT3dBeuw376rvHqoZBKW3UmvguoBAf",
# "fee":"0.1000000000000000",
# "asset_symbol":"XRP"
# }
#
currencyId = self.safe_string(transaction, 'asset_symbol')
code = self.safe_currency_code(currencyId)
id = self.safe_integer(transaction, 'id')
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
timestamp = self.parse8601(self.safe_string(transaction, 'inserted_at'))
updated = self.parse8601(self.safe_string_2(transaction, 'updated_at', 'completed_at'))
txid = self.safe_string(transaction, 'txid')
address = self.safe_string(transaction, 'target_address')
tag = self.safe_string(transaction, 'memo')
type = 'deposit' if ('customer_id' in transaction) else 'withdrawal'
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': None,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': None,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'page_token': 'dxzef', # request page after self page token
# 'limit': 50, # optional, default 50
# 'kind': 'string', # optional - air_drop, big_holder_dividend, default, eosc_to_eos, internal, equally_airdrop, referral_mining, one_holder_dividend, single_customer, snapshotted_airdrop, trade_mining
# 'asset_symbol': 'BTC', # optional
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_symbol'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50
response = self.privateGetDeposits(self.extend(request, params))
#
# {
# "code": 0,
# "page_token": "NQ==",
# "data": [
# {
# "id": 5,
# "amount": "25.0",
# "confirms": 100,
# "txid": "72e03037d144dae3d32b68b5045462b1049a0755",
# "is_internal": False,
# "inserted_at": "2018-02-16T11:39:58.000Z",
# "updated_at": "2018-11-09T10:20:09.000Z",
# "kind": "default",
# "memo": "",
# "state": "WITHHOLD",
# "asset_symbol": "BTS"
# }
# ]
# }
#
deposits = self.safe_value(response, 'data', [])
return self.parse_transactions(deposits, code, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'page_token': 'dxzef', # request page after self page token
# 'limit': 50, # optional, default 50
# 'kind': 'string', # optional - air_drop, big_holder_dividend, default, eosc_to_eos, internal, equally_airdrop, referral_mining, one_holder_dividend, single_customer, snapshotted_airdrop, trade_mining
# 'asset_symbol': 'BTC', # optional
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_symbol'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50
response = self.privateGetWithdrawals(self.extend(request, params))
#
# {
# "code": 0,
# "data": [
# {
# "id": 10,
# "customer_id": "10",
# "asset_symbol": "ETH",
# "amount": "5",
# "state": "CONFIRMED",
# "note": "2018-03-15T16:13:45.610463Z",
# "txid": "0x4643bb6b393ac20a6175c713175734a72517c63d6f73a3ca90a15356f2e967da0",
# "completed_at": "2018-03-15T16:13:45.610463Z",
# "inserted_at": "2018-03-15T16:13:45.610463Z",
# "is_internal": True,
# "target_address": "0x4643bb6b393ac20a6175c713175734a72517c63d6f7"
# }
# ],
# "page_token":"dxvf"
# }
#
withdrawals = self.safe_value(response, 'data', [])
return self.parse_transactions(withdrawals, code, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'target_address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
# requires write permission on the wallet
response = self.privatePostWithdrawals(self.extend(request, params))
#
# {
# "code":0,
# "message":"",
# "data":{
# "id":1077391,
# "customer_id":1082679,
# "amount":"21.9000000000000000",
# "txid":"",
# "is_internal":false,
# "kind":"on_chain",
# "state":"PENDING",
# "inserted_at":"2020-06-03T00:50:57+00:00",
# "updated_at":"2020-06-03T00:50:57+00:00",
# "memo":"",
# "target_address":"rDYtYT3dBeuw376rvHqoZBKW3UmvguoBAf",
# "fee":"0.1000000000000000",
# "asset_symbol":"XRP"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transaction(data, currency)
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"code":10013,"message":"Resource not found"}
# {"code":40004,"message":"invalid jwt"}
#
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'message')
if code != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message | unknown | codeparrot/codeparrot-clean | ||
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from yepes.types import Undefined
class ModelMixin(object):
"""
Adds a convenience method for easily obtain the model of the view object.
"""
_model = Undefined
def get_model(self):
"""
Returns the model of the view object.
First looks for ``self.model``. If it is not set, looks for
``self.object``. If it is also not set, looks for ``self.object_list``.
And finally tries with ``self.get_queryset()``.
"""
if self._model is Undefined:
try:
# If model has been explicitly provided, use it.
self._model = self.model
except AttributeError:
self._model = None
if self._model is None:
try:
# If this view is operating on a single object, use the
# class of that object.
self._model = self.object.__class__
except AttributeError:
try:
# If this view is operating on a list of objects, try
# to get the model class from that.
self._model = self.object_list.model
except AttributeError:
try:
# Try to get a queryset and extract the model
# class from that.
self._model = self.get_queryset().model
except AttributeError:
pass
return self._model | unknown | codeparrot/codeparrot-clean | ||
// RUN: %check_clang_tidy %s bugprone-sizeof-container %t -- -- -target x86_64-unknown-unknown
namespace std {
typedef unsigned int size_t;
template <typename T>
struct basic_string {
size_t size() const;
};
template <typename T>
basic_string<T> operator+(const basic_string<T> &, const T *);
typedef basic_string<char> string;
template <typename T>
struct vector {
size_t size() const;
};
// std::bitset<> is not a container. sizeof() is reasonable for it.
template <size_t N>
struct bitset {
size_t size() const;
};
// std::array<> is, well, an array. sizeof() is reasonable for it.
template <typename T, size_t N>
struct array {
size_t size() const;
};
class fake_container1 {
size_t size() const; // non-public
};
struct fake_container2 {
size_t size(); // non-const
};
}
using std::size_t;
#define ARRAYSIZE(a) \
((sizeof(a) / sizeof(*(a))) / static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
#define ARRAYSIZE2(a) \
(((sizeof(a)) / (sizeof(*(a)))) / static_cast<size_t>(!((sizeof(a)) % (sizeof(*(a))))))
struct string {
std::size_t size() const;
};
template<typename T>
void g(T t) {
(void)sizeof(t);
}
void f() {
string s1;
std::string s2;
std::vector<int> v;
int a = 42 + sizeof(s1);
// CHECK-MESSAGES: :[[@LINE-1]]:16: warning: sizeof() doesn't return the size of the container; did you mean .size()? [bugprone-sizeof-container]
a = 123 * sizeof(s2);
// CHECK-MESSAGES: :[[@LINE-1]]:13: warning: sizeof() doesn't return the size
a = 45 + sizeof(s2 + "asdf");
// CHECK-MESSAGES: :[[@LINE-1]]:12: warning: sizeof() doesn't return the size
a = sizeof(v);
// CHECK-MESSAGES: :[[@LINE-1]]:7: warning: sizeof() doesn't return the size
a = sizeof(std::vector<int>{});
// CHECK-MESSAGES: :[[@LINE-1]]:7: warning: sizeof() doesn't return the size
a = sizeof(a);
a = sizeof(int);
a = sizeof(std::string);
a = sizeof(std::vector<int>);
g(s1);
g(s2);
g(v);
std::fake_container1 fake1;
std::fake_container2 fake2;
std::bitset<7> std_bitset;
std::array<int, 3> std_array;
a = sizeof(fake1);
a = sizeof(fake2);
a = sizeof(std_bitset);
a = sizeof(std_array);
std::string arr[3];
a = ARRAYSIZE(arr);
a = ARRAYSIZE2(arr);
a = sizeof(arr) / sizeof(arr[0]);
(void)a;
} | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-tidy/checkers/bugprone/sizeof-container.cpp |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-04 16:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0017_auto_20160104_0728'),
]
operations = [
migrations.AlterField(
model_name='talkproposal',
name='objective',
field=models.TextField(help_text='Who is the intended audience for your talk? (Be specific, "Python users" is not a good answer). And what will the attendees get out of your talk? When they leave the room, what will they learn that they didn\'t know before? This is NOT made public and for REVIEW ONLY.', max_length=500, verbose_name='objective'),
),
migrations.AlterField(
model_name='talkproposal',
name='outline',
field=models.TextField(blank=True, help_text='How the talk will be arranged. It is highly recommended to attach the estimated time length for each sections in the talk. Talks in favor of 45min should have a fallback plan about how to shrink the content into a 25min one. This is NOT made public and for REVIEW ONLY.', verbose_name='outline'),
),
migrations.AlterField(
model_name='talkproposal',
name='supplementary',
field=models.TextField(blank=True, default='', help_text="Anything else you'd like the program committee to know when making their selection: your past speaking experience, community experience, etc. This is NOT made public and for REVIEW ONLY. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", verbose_name='supplementary'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='objective',
field=models.TextField(help_text='Who is the intended audience for your talk? (Be specific, "Python users" is not a good answer). And what will the attendees get out of your talk? When they leave the room, what will they learn that they didn\'t know before? This is NOT made public and for REVIEW ONLY.', max_length=500, verbose_name='objective'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='outline',
field=models.TextField(blank=True, help_text='How the tutorial will be arranged. You should enumerate over each section in your talk and attach each section with the estimated time length. This is NOT made public and for REVIEW ONLY.', verbose_name='outline'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='supplementary',
field=models.TextField(blank=True, default='', help_text="Anything else you'd like the program committee to know when making their selection: your past speaking experience, community experience, etc. This is NOT made public and for REVIEW ONLY. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", verbose_name='supplementary'),
),
] | unknown | codeparrot/codeparrot-clean | ||
from django.apps import AppConfig
from django.core.signals import setting_changed
from django.db import connections
from django.db.backends.postgresql.psycopg_any import RANGE_TYPES
from django.db.backends.signals import connection_created
from django.db.migrations.writer import MigrationWriter
from django.db.models import CharField, OrderBy, TextField
from django.db.models.functions import Collate
from django.db.models.indexes import IndexExpression
from django.utils.translation import gettext_lazy as _
from .indexes import OpClass
from .lookups import (
SearchLookup,
TrigramSimilar,
TrigramStrictWordSimilar,
TrigramWordSimilar,
Unaccent,
)
from .serializers import RangeSerializer
from .signals import register_type_handlers
def uninstall_if_needed(setting, value, enter, **kwargs):
"""
Undo the effects of PostgresConfig.ready() when django.contrib.postgres
is "uninstalled" by override_settings().
"""
if (
not enter
and setting == "INSTALLED_APPS"
and "django.contrib.postgres" not in set(value)
):
connection_created.disconnect(register_type_handlers)
CharField._unregister_lookup(Unaccent)
TextField._unregister_lookup(Unaccent)
CharField._unregister_lookup(SearchLookup)
TextField._unregister_lookup(SearchLookup)
CharField._unregister_lookup(TrigramSimilar)
TextField._unregister_lookup(TrigramSimilar)
CharField._unregister_lookup(TrigramWordSimilar)
TextField._unregister_lookup(TrigramWordSimilar)
CharField._unregister_lookup(TrigramStrictWordSimilar)
TextField._unregister_lookup(TrigramStrictWordSimilar)
# Disconnect this receiver until the next time this app is installed
# and ready() connects it again to prevent unnecessary processing on
# each setting change.
setting_changed.disconnect(uninstall_if_needed)
MigrationWriter.unregister_serializer(RANGE_TYPES)
class PostgresConfig(AppConfig):
name = "django.contrib.postgres"
verbose_name = _("PostgreSQL extensions")
def ready(self):
setting_changed.connect(uninstall_if_needed)
# Connections may already exist before we are called.
for conn in connections.all(initialized_only=True):
if conn.vendor == "postgresql":
conn.introspection.data_types_reverse.update(
{
3904: "django.contrib.postgres.fields.IntegerRangeField",
3906: "django.contrib.postgres.fields.DecimalRangeField",
3910: "django.contrib.postgres.fields.DateTimeRangeField",
3912: "django.contrib.postgres.fields.DateRangeField",
3926: "django.contrib.postgres.fields.BigIntegerRangeField",
# PostgreSQL OIDs may vary depending on the
# installation, especially for datatypes from
# extensions, e.g. "hstore". In such cases, the
# type_display attribute (psycopg 3.2+) should be used.
"hstore": "django.contrib.postgres.fields.HStoreField",
}
)
if conn.connection is not None:
register_type_handlers(conn)
connection_created.connect(register_type_handlers)
CharField.register_lookup(Unaccent)
TextField.register_lookup(Unaccent)
CharField.register_lookup(SearchLookup)
TextField.register_lookup(SearchLookup)
CharField.register_lookup(TrigramSimilar)
TextField.register_lookup(TrigramSimilar)
CharField.register_lookup(TrigramWordSimilar)
TextField.register_lookup(TrigramWordSimilar)
CharField.register_lookup(TrigramStrictWordSimilar)
TextField.register_lookup(TrigramStrictWordSimilar)
MigrationWriter.register_serializer(RANGE_TYPES, RangeSerializer)
IndexExpression.register_wrappers(OrderBy, OpClass, Collate) | python | github | https://github.com/django/django | django/contrib/postgres/apps.py |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/memory-controllers/canaan,k210-sram.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Canaan K210 SRAM memory controller
description:
The Canaan K210 SRAM memory controller is responsible for the system's 8 MiB
of SRAM. The controller is initialised by the bootloader, which configures
its clocks, before OS bringup.
maintainers:
- Conor Dooley <conor@kernel.org>
properties:
compatible:
enum:
- canaan,k210-sram
clocks:
minItems: 1
items:
- description: sram0 clock
- description: sram1 clock
- description: aisram clock
clock-names:
minItems: 1
items:
- const: sram0
- const: sram1
- const: aisram
required:
- compatible
- clocks
- clock-names
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/k210-clk.h>
memory-controller {
compatible = "canaan,k210-sram";
clocks = <&sysclk K210_CLK_SRAM0>,
<&sysclk K210_CLK_SRAM1>,
<&sysclk K210_CLK_AI>;
clock-names = "sram0", "sram1", "aisram";
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/memory-controllers/canaan,k210-sram.yaml |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
from ansible.module_utils.basic import AnsibleModule, is_executable
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: supervisorctl
short_description: Manage the state of a program or group of programs running via supervisord
description:
- Manage the state of a program or group of programs running via supervisord
version_added: "0.7"
options:
name:
description:
- The name of the supervisord program or group to manage.
- The name will be taken as group name when it ends with a colon I(:)
- Group support is only available in Ansible version 1.6 or later.
required: true
default: null
config:
description:
- The supervisor configuration file path
required: false
default: null
version_added: "1.3"
server_url:
description:
- URL on which supervisord server is listening
required: false
default: null
version_added: "1.3"
username:
description:
- username to use for authentication
required: false
default: null
version_added: "1.3"
password:
description:
- password to use for authentication
required: false
default: null
version_added: "1.3"
state:
description:
- The desired state of program/group.
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "absent" ]
supervisorctl_path:
description:
- path to supervisorctl executable
required: false
default: null
version_added: "1.4"
notes:
- When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
- When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
requirements: [ "supervisorctl" ]
author:
- "Matt Wright (@mattupstate)"
- "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
'''
EXAMPLES = '''
# Manage the state of program to be in 'started' state.
- supervisorctl:
name: my_app
state: started
# Manage the state of program group to be in 'started' state.
- supervisorctl:
name: 'my_apps:'
state: started
# Restart my_app, reading supervisorctl configuration from a specified file.
- supervisorctl:
name: my_app
state: restarted
config: /var/opt/my_project/supervisord.conf
# Restart my_app, connecting to supervisord with credentials and server URL.
- supervisorctl:
name: my_app
state: restarted
username: test
password: testpass
server_url: http://localhost:9001
'''
def main():
arg_spec = dict(
name=dict(required=True),
config=dict(required=False, type='path'),
server_url=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
supervisorctl_path=dict(required=False, type='path'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
is_group = False
if name.endswith(':'):
is_group = True
name = name.rstrip(':')
state = module.params['state']
config = module.params.get('config')
server_url = module.params.get('server_url')
username = module.params.get('username')
password = module.params.get('password')
supervisorctl_path = module.params.get('supervisorctl_path')
# we check error message for a pattern, so we need to make sure that's in C locale
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
if supervisorctl_path:
if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
supervisorctl_args = [supervisorctl_path]
else:
module.fail_json(
msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
else:
supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
if config:
supervisorctl_args.extend(['-c', config])
if server_url:
supervisorctl_args.extend(['-s', server_url])
if username:
supervisorctl_args.extend(['-u', username])
if password:
supervisorctl_args.extend(['-p', password])
def run_supervisorctl(cmd, name=None, **kwargs):
args = list(supervisorctl_args) # copy the master args
args.append(cmd)
if name:
args.append(name)
return module.run_command(args, **kwargs)
def get_matched_processes():
matched = []
rc, out, err = run_supervisorctl('status')
for line in out.splitlines():
# One status line may look like one of these two:
# process not in group:
# echo_date_lonely RUNNING pid 7680, uptime 13:22:18
# process in group:
# echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
fields = [field for field in line.split(' ') if field != '']
process_name = fields[0]
status = fields[1]
if is_group:
# If there is ':', this process must be in a group.
if ':' in process_name:
group = process_name.split(':')[0]
if group != name:
continue
else:
continue
else:
if process_name != name:
continue
matched.append((process_name, status))
return matched
def take_action_on_processes(processes, status_filter, action, expected_result):
to_take_action_on = []
for process_name, status in processes:
if status_filter(status):
to_take_action_on.append(process_name)
if len(to_take_action_on) == 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
for process_name in to_take_action_on:
rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
if '%s: %s' % (process_name, expected_result) not in out:
module.fail_json(msg=out)
module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
if state == 'restarted':
rc, out, err = run_supervisorctl('update', check_rc=True)
processes = get_matched_processes()
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: True, 'restart', 'started')
processes = get_matched_processes()
if state == 'absent':
if len(processes) == 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('remove', name)
if '%s: removed process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
if state == 'present':
if len(processes) > 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('add', name)
if '%s: added process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
if state == 'started':
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
if state == 'stopped':
if len(processes) == 0:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'string',
'double': 'float',
'short': 'int',
'unsigned': 'string'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
if content[1] != 'unsigned':
keyword = content[-1]
else:
keyword = content[-1]
keyword = keyword.replace(';\n', '')
print content, keyword
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
try:
fcpp = open('KSUserApiDataTypeEx.h','r')
fpy = open('ksgold_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for n, line in enumerate(fcpp):
py_line = process_line(line)
if py_line:
fpy.write(py_line.decode('gbk').encode('utf-8'))
print n
fcpp.close()
fpy.close()
print u'data_type.py生成过程完成'
except Exception, e:
print u'data_type.py生成过程出错'
print e
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/devicetainteviction/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with DeviceTaintEvictionControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_DeviceTaintEvictionControllerConfiguration_To_config_DeviceTaintEvictionControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_DeviceTaintEvictionControllerConfiguration_To_config_DeviceTaintEvictionControllerConfiguration(in *v1alpha1.DeviceTaintEvictionControllerConfiguration, out *config.DeviceTaintEvictionControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_DeviceTaintEvictionControllerConfiguration_To_config_DeviceTaintEvictionControllerConfiguration(in, out, s)
}
// Convert_config_DeviceTaintEvictionControllerConfiguration_To_v1alpha1_DeviceTaintEvictionControllerConfiguration is an autogenerated conversion function.
func Convert_config_DeviceTaintEvictionControllerConfiguration_To_v1alpha1_DeviceTaintEvictionControllerConfiguration(in *config.DeviceTaintEvictionControllerConfiguration, out *v1alpha1.DeviceTaintEvictionControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_DeviceTaintEvictionControllerConfiguration_To_v1alpha1_DeviceTaintEvictionControllerConfiguration(in, out, s)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/devicetainteviction/config/v1alpha1/conversion.go |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.data.generators.pattern_machine import ConsecutivePatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.research.temporal_memory import TemporalMemory
from nupic.research.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin, TemporalMemory): pass
class TemporalMemoryMonitorMixinTest(unittest.TestCase):
def setUp(self):
self.patternMachine = ConsecutivePatternMachine(100, 5)
self.sequenceMachine = SequenceMachine(self.patternMachine)
self.tm = MonitoredTemporalMemory(columnDimensions=[100],
cellsPerColumn=4,
initialPermanence=0.6,
connectedPermanence=0.5,
minThreshold=1,
maxNewSynapseCount=6,
permanenceIncrement=0.1,
permanenceDecrement=0.05,
activationThreshold=1)
def testFeedSequence(self):
sequence = self._generateSequence()
sequenceLength = len(sequence) - 3 # without resets
# Replace last pattern (before the None) with an unpredicted one
sequence[-2] = self.patternMachine.get(4)
self._feedSequence(sequence, sequenceLabel="Test")
activeColumnsTrace = self.tm.mmGetTraceActiveColumns()
predictiveCellsTrace = self.tm.mmGetTracePredictiveCells()
sequenceLabelsTrace = self.tm.mmGetTraceSequenceLabels()
resetsTrace = self.tm.mmGetTraceResets()
predictedActiveCellsTrace = self.tm.mmGetTracePredictedActiveCells()
predictedInactiveCellsTrace = self.tm.mmGetTracePredictedInactiveCells()
predictedActiveColumnsTrace = self.tm.mmGetTracePredictedActiveColumns()
predictedInactiveColumnsTrace = self.tm.mmGetTracePredictedInactiveColumns()
unpredictedActiveColumnsTrace = self.tm.mmGetTraceUnpredictedActiveColumns()
self.assertEqual(len(activeColumnsTrace.data), sequenceLength)
self.assertEqual(len(predictiveCellsTrace.data), sequenceLength)
self.assertEqual(len(sequenceLabelsTrace.data), sequenceLength)
self.assertEqual(len(resetsTrace.data), sequenceLength)
self.assertEqual(len(predictedActiveCellsTrace.data), sequenceLength)
self.assertEqual(len(predictedInactiveCellsTrace.data), sequenceLength)
self.assertEqual(len(predictedActiveColumnsTrace.data), sequenceLength)
self.assertEqual(len(predictedInactiveColumnsTrace.data), sequenceLength)
self.assertEqual(len(unpredictedActiveColumnsTrace.data), sequenceLength)
self.assertEqual(activeColumnsTrace.data[-1], self.patternMachine.get(4))
self.assertEqual(sequenceLabelsTrace.data[-1], "Test")
self.assertEqual(resetsTrace.data[0], True)
self.assertEqual(resetsTrace.data[1], False)
self.assertEqual(resetsTrace.data[10], True)
self.assertEqual(resetsTrace.data[-1], False)
self.assertEqual(len(predictedActiveCellsTrace.data[-2]), 5)
self.assertEqual(len(predictedActiveCellsTrace.data[-1]), 0)
self.assertEqual(len(predictedInactiveCellsTrace.data[-2]), 0)
self.assertEqual(len(predictedInactiveCellsTrace.data[-1]), 5)
self.assertEqual(len(predictedActiveColumnsTrace.data[-2]), 5)
self.assertEqual(len(predictedActiveColumnsTrace.data[-1]), 0)
self.assertEqual(len(predictedInactiveColumnsTrace.data[-2]), 0)
self.assertEqual(len(predictedInactiveColumnsTrace.data[-1]), 5)
self.assertEqual(len(unpredictedActiveColumnsTrace.data[-2]), 0)
self.assertEqual(len(unpredictedActiveColumnsTrace.data[-1]), 5)
def testClearHistory(self):
sequence = self._generateSequence()
self._feedSequence(sequence, sequenceLabel="Test")
self.tm.mmClearHistory()
activeColumnsTrace = self.tm.mmGetTraceActiveColumns()
predictiveCellsTrace = self.tm.mmGetTracePredictiveCells()
sequenceLabelsTrace = self.tm.mmGetTraceSequenceLabels()
resetsTrace = self.tm.mmGetTraceResets()
predictedActiveCellsTrace = self.tm.mmGetTracePredictedActiveCells()
predictedInactiveCellsTrace = self.tm.mmGetTracePredictedInactiveCells()
predictedActiveColumnsTrace = self.tm.mmGetTracePredictedActiveColumns()
predictedInactiveColumnsTrace = self.tm.mmGetTracePredictedInactiveColumns()
unpredictedActiveColumnsTrace = self.tm.mmGetTraceUnpredictedActiveColumns()
self.assertEqual(len(activeColumnsTrace.data), 0)
self.assertEqual(len(predictiveCellsTrace.data), 0)
self.assertEqual(len(sequenceLabelsTrace.data), 0)
self.assertEqual(len(resetsTrace.data), 0)
self.assertEqual(len(predictedActiveCellsTrace.data), 0)
self.assertEqual(len(predictedInactiveCellsTrace.data), 0)
self.assertEqual(len(predictedActiveColumnsTrace.data), 0)
self.assertEqual(len(predictedInactiveColumnsTrace.data), 0)
self.assertEqual(len(unpredictedActiveColumnsTrace.data), 0)
def testSequencesMetrics(self):
sequence = self._generateSequence()
self._feedSequence(sequence, "Test1")
sequence.reverse()
sequence.append(sequence.pop(0)) # Move None (reset) to the end
self._feedSequence(sequence, "Test2")
sequencesPredictedActiveCellsPerColumnMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsPerColumn()
sequencesPredictedActiveCellsSharedMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsShared()
self.assertEqual(sequencesPredictedActiveCellsPerColumnMetric.mean, 1)
self.assertEqual(sequencesPredictedActiveCellsSharedMetric.mean, 1)
self._feedSequence(sequence, "Test3")
sequencesPredictedActiveCellsPerColumnMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsPerColumn()
sequencesPredictedActiveCellsSharedMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsShared()
self.assertEqual(sequencesPredictedActiveCellsPerColumnMetric.mean, 1)
self.assertTrue(sequencesPredictedActiveCellsSharedMetric.mean > 1)
# ==============================
# Helper functions
# ==============================
def _generateSequence(self):
numbers = range(0, 10)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
sequence.append(None)
sequence *= 3
return sequence
def _feedSequence(self, sequence, sequenceLabel=None):
for pattern in sequence:
if pattern is None:
self.tm.reset()
else:
self.tm.compute(pattern, sequenceLabel=sequenceLabel)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
import sys, requests, random, optparse, time, json
from itertools import islice
from threading import Thread
from collections import Counter
import requests
import six
from six.moves.queue import Queue
from .utils import SplashServer, MockServer
class StressTest():
def __init__(self, reqs, host="localhost:8050", requests=1000, concurrency=50, shuffle=False, verbose=False):
self.reqs = reqs
self.host = host
self.requests = requests
self.concurrency = concurrency
self.shuffle = shuffle
self.verbose = verbose
def run(self):
args = list(islice(self.reqs, self.requests))
if self.shuffle:
random.shuffle(args)
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
starttime = time.time()
q, p = Queue(), Queue()
for _ in six.moves.range(self.concurrency):
t = Thread(target=worker, args=(self.host, q, p, self.verbose))
t.daemon = True
t.start()
for a in args:
q.put(a)
q.join()
outputs = []
for _ in six.moves.range(self.requests):
outputs.append(p.get())
elapsed = time.time() - starttime
print()
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
print("Elapsed time : %.3fs" % elapsed)
print("Avg time p/req: %.3fs" % (elapsed/len(args)))
print("Received (per status code or error):")
for c, n in Counter(outputs).items():
print(" %s: %d" % (c, n))
def worker(host, q, p, verbose=False):
url = "http://%s/render.html" % host
while True:
try:
args = q.get()
t = time.time()
r = requests.get(url, params=args)
t = time.time() - t
p.put(r.status_code)
if verbose:
print(". %d %.3fs %s" % (r.status_code, t, args))
else:
sys.stdout.write(".")
sys.stdout.flush()
except Exception as e:
p.put(type(e))
if verbose:
print("E %.3fs %s" % (t, args))
else:
sys.stdout.write("E")
sys.stdout.flush()
finally:
q.task_done()
class MockArgs(object):
ok_urls = 0.5
error_urls = 0.3
timeout_urls = 0.2
def __init__(self, requests=1000):
self.requests = requests
def _ok_urls(self):
url = ["http://localhost:8998/jsrender"]
return int(self.requests * self.ok_urls) * url
def _error_urls(self):
url = ["http://non-existent-host/"]
return int(self.requests * self.error_urls) * url
def _timeout_urls(self):
url = ["http://localhost:8998/delay?n=10&timeout=0.5"]
return int(self.requests * self.timeout_urls) * url
def __iter__(self):
ok_urls = self._ok_urls()
error_urls = self._error_urls()
timeout_urls = self._timeout_urls()
print("Expected codes: HTTP200x%d, HTTP502x%d, HTTP504x%d" % (
len(ok_urls), len(error_urls), len(timeout_urls)))
urls = ok_urls + error_urls + timeout_urls
return ({"url": x} for x in urls)
class ArgsFromUrlFile(object):
def __init__(self, urlfile):
self.urlfile = urlfile
def __iter__(self):
for line in open(self.urlfile):
url = line.rstrip()
if '://' not in url:
url = 'http://' + url
yield {"url": url, "timeout": 60}
class ArgsFromLogfile(object):
def __init__(self, logfile):
self.logfile = logfile
def __iter__(self):
for l in open(self.logfile):
if "[stats]" in l:
d = json.loads(l[33:].rstrip())
yield d['args']
def lua_runonce(script, timeout=60., splash_args=None, **kwargs):
""" Start splash server, execute lua script in it and return the output.
:type script: str
:param script: Script to be executed.
:type timeout: float
:param timeout: Timeout value for the execution request.
:param splash_args: Extra parameters for splash server invocation.
:type kwargs: dict
:param kwargs: Any other parameters are passed as arguments to the request
and will be available via ``splash.args``.
This function also starts a `MockServer`. If `url` kwarg has scheme=mock,
e.g., "mock://jsrender", it will be resolved as a url pointing to
corresponding mock server resource.
"""
if splash_args is None:
splash_args = ['--disable-lua-sandbox',
'--allowed-schemes=file,http,https', ]
with SplashServer(extra_args=splash_args) as s, \
MockServer() as ms:
if kwargs.get('url', '').startswith('mock://'):
kwargs['url'] = ms.url(kwargs['url'][7:])
params = {'lua_source': script}
params.update(kwargs)
resp = requests.get(s.url('execute'), params=params, timeout=timeout)
if resp.ok:
return resp.content
else:
raise RuntimeError(resp.text)
def benchmark_png(url, viewport=None, wait=0.5, render_all=1,
width=None, height=None, nrepeats=3, timeout=60.):
f = """
function main(splash)
local resp, err = splash:go(splash.args.url)
assert(resp, err)
assert(splash:wait(tonumber(splash.args.wait)))
-- if viewport is 'full' it should be set only after waiting
if splash.args.viewport ~= nil and splash.args.viewport ~= "full" then
local w, h = string.match(splash.args.viewport, '^(%d+)x(%d+)')
if w == nil or h == nil then
error('Invalid viewport size format: ' .. splash.args.viewport)
end
splash:set_viewport_size(tonumber(w), tonumber(h))
end
local susage = splash:get_perf_stats()
local nrepeats = tonumber(splash.args.nrepeats)
local render_all = splash.args.render_all or splash.args.viewport == 'full'
local png, err
for i = 1, nrepeats do
png, err = splash:png{width=splash.args.width,
height=splash.args.height,
render_all=render_all}
assert(png, err)
end
local eusage = splash:get_perf_stats()
return {
wallclock_secs=(eusage.walltime - susage.walltime) / nrepeats,
maxrss=eusage.maxrss,
cpu_secs=(eusage.cputime - susage.cputime) / nrepeats,
png=png,
}
end
"""
return json.loads(lua_runonce(
f, url=url, width=width, height=height, render_all=render_all,
nrepeats=nrepeats, wait=wait, viewport=viewport, timeout=timeout))
def parse_opts():
op = optparse.OptionParser()
op.add_option("-H", dest="host", default="localhost:8050",
help="splash hostname & port (default: %default)")
op.add_option("-u", dest="urlfile", metavar="FILE",
help="read urls from FILE instead of using mock server ones")
op.add_option("-l", dest="logfile", metavar="FILE",
help="read urls from splash log file (useful for replaying)")
op.add_option("-s", dest="shuffle", action="store_true", default=False,
help="shuffle (randomize) requests (default: %default)")
op.add_option("-v", dest="verbose", action="store_true", default=False,
help="verbose mode (default: %default)")
op.add_option("-c", dest="concurrency", type="int", default=50,
help="concurrency (default: %default)")
op.add_option("-n", dest="requests", type="int", default=1000,
help="number of requests (default: %default)")
return op.parse_args()
def main():
opts, _ = parse_opts()
if opts.urlfile:
urls = ArgsFromUrlFile(opts.urlfile)
elif opts.logfile:
urls = ArgsFromLogfile(opts.logfile)
else:
urls = MockArgs(opts.requests)
t = StressTest(urls, opts.host, opts.requests, opts.concurrency, opts.shuffle, opts.verbose)
t.run()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from werkzeug.exceptions import NotFound
from MaKaC.review import Abstract
from MaKaC.webinterface.rh.conferenceBase import RHFileBase
from MaKaC.webinterface.rh.base import RHDisplayBaseProtected
from MaKaC.errors import NotFoundError
from MaKaC.conference import LocalFile
from indico.web.flask.util import send_file
class RHFileAccess(RHFileBase, RHDisplayBaseProtected):
def _checkParams( self, params ):
try:
RHFileBase._checkParams( self, params )
except:
raise NotFoundError("The file you tried to access does not exist.")
def _checkProtection( self ):
if isinstance(self._file.getOwner(), Abstract):
RHDisplayBaseProtected._checkProtection(self)
else:
# superseded by attachments
raise NotFound
def _process(self):
assert isinstance(self._file, LocalFile)
return send_file(self._file.getFileName(), self._file.getFilePath(), self._file.getFileType(),
self._file.getCreationDate()) | unknown | codeparrot/codeparrot-clean | ||
import {
CancellationToken,
codefix,
Debug,
fileShouldUseJavaScriptRequire,
findAncestor,
findIndex,
findTokenOnLeftOfPosition,
forEachChild,
formatting,
getNewLineOrDefaultFromHost,
getQuotePreference,
getTokenAtPosition,
isIdentifier,
Program,
rangeContainsPosition,
rangeContainsRange,
SourceFile,
Statement,
SymbolFlags,
textChanges,
TextRange,
UserPreferences,
} from "./_namespaces/ts.js";
import { addTargetFileImports } from "./refactors/helpers.js";
import {
addExportsInOldFile,
getExistingLocals,
getUsageInfo,
} from "./refactors/moveToFile.js";
import {
CodeFixContextBase,
FileTextChanges,
LanguageServiceHost,
PasteEdits,
} from "./types.js";
const fixId = "providePostPasteEdits";
/** @internal */
export function pasteEditsProvider(
targetFile: SourceFile,
pastedText: string[],
pasteLocations: TextRange[],
copiedFrom: { file: SourceFile; range: TextRange[]; } | undefined,
host: LanguageServiceHost,
preferences: UserPreferences,
formatContext: formatting.FormatContext,
cancellationToken: CancellationToken,
): PasteEdits {
const changes: FileTextChanges[] = textChanges.ChangeTracker.with({ host, formatContext, preferences }, changeTracker => pasteEdits(targetFile, pastedText, pasteLocations, copiedFrom, host, preferences, formatContext, cancellationToken, changeTracker));
return { edits: changes, fixId };
}
interface CopiedFromInfo {
file: SourceFile;
range: TextRange[];
}
function pasteEdits(
targetFile: SourceFile,
pastedText: string[],
pasteLocations: TextRange[],
copiedFrom: CopiedFromInfo | undefined,
host: LanguageServiceHost,
preferences: UserPreferences,
formatContext: formatting.FormatContext,
cancellationToken: CancellationToken,
changes: textChanges.ChangeTracker,
) {
let actualPastedText: string | undefined;
if (pastedText.length !== pasteLocations.length) {
actualPastedText = pastedText.length === 1 ? pastedText[0] : pastedText.join(getNewLineOrDefaultFromHost(formatContext.host, formatContext.options));
}
const statements: Statement[] = [];
let newText = targetFile.text;
for (let i = pasteLocations.length - 1; i >= 0; i--) {
const { pos, end } = pasteLocations[i];
newText = actualPastedText ? newText.slice(0, pos) + actualPastedText + newText.slice(end) : newText.slice(0, pos) + pastedText[i] + newText.slice(end);
}
let importAdder: codefix.ImportAdder;
Debug.checkDefined(host.runWithTemporaryFileUpdate).call(host, targetFile.fileName, newText, (updatedProgram: Program, originalProgram: Program | undefined, updatedFile: SourceFile) => {
importAdder = codefix.createImportAdder(updatedFile, updatedProgram, preferences, host);
if (copiedFrom?.range) {
Debug.assert(copiedFrom.range.length === pastedText.length);
copiedFrom.range.forEach(copy => {
const statementsInSourceFile = copiedFrom.file.statements;
const startNodeIndex = findIndex(statementsInSourceFile, s => s.end > copy.pos);
if (startNodeIndex === -1) return undefined;
let endNodeIndex = findIndex(statementsInSourceFile, s => s.end >= copy.end, startNodeIndex);
/**
* [|console.log(a);
* |]
* console.log(b);
*/
if (endNodeIndex !== -1 && copy.end <= statementsInSourceFile[endNodeIndex].getStart()) {
endNodeIndex--;
}
statements.push(...statementsInSourceFile.slice(startNodeIndex, endNodeIndex === -1 ? statementsInSourceFile.length : endNodeIndex + 1));
});
Debug.assertIsDefined(originalProgram, "no original program found");
const originalProgramTypeChecker = originalProgram.getTypeChecker();
const usageInfoRange = getUsageInfoRangeForPasteEdits(copiedFrom);
const usage = getUsageInfo(copiedFrom.file, statements, originalProgramTypeChecker, getExistingLocals(updatedFile, statements, originalProgramTypeChecker), usageInfoRange);
const useEsModuleSyntax = !fileShouldUseJavaScriptRequire(targetFile.fileName, originalProgram, host, !!copiedFrom.file.commonJsModuleIndicator);
addExportsInOldFile(copiedFrom.file, usage.targetFileImportsFromOldFile, changes, useEsModuleSyntax);
addTargetFileImports(copiedFrom.file, usage.oldImportsNeededByTargetFile, usage.targetFileImportsFromOldFile, originalProgramTypeChecker, updatedProgram, importAdder);
}
else {
const context: CodeFixContextBase = {
sourceFile: updatedFile,
program: originalProgram!,
cancellationToken,
host,
preferences,
formatContext,
};
// `updatedRanges` represent the new ranges that account for the offset changes caused by pasting new text and
// `offset` represents by how much the starting position of `pasteLocations` needs to be changed.
//
// We iterate over each updated range to get the node that wholly encloses the updated range.
// For each child of that node, we checked for unresolved identifiers
// within the updated range and try importing it.
let offset = 0;
pasteLocations.forEach((location, i) => {
const oldTextLength = location.end - location.pos;
const textToBePasted = actualPastedText ?? pastedText[i];
const startPos = location.pos + offset;
const endPos = startPos + textToBePasted.length;
const range: TextRange = { pos: startPos, end: endPos };
offset += textToBePasted.length - oldTextLength;
const enclosingNode = findAncestor(
getTokenAtPosition(context.sourceFile, range.pos),
ancestorNode => rangeContainsRange(ancestorNode, range),
);
if (!enclosingNode) return;
forEachChild(enclosingNode, function importUnresolvedIdentifiers(node) {
const isImportCandidate = isIdentifier(node) &&
rangeContainsPosition(range, node.getStart(updatedFile)) &&
!updatedProgram?.getTypeChecker().resolveName(
node.text,
node,
SymbolFlags.All,
/*excludeGlobals*/ false,
);
if (isImportCandidate) {
return importAdder.addImportForUnresolvedIdentifier(
context,
node,
/*useAutoImportProvider*/ true,
);
}
node.forEachChild(importUnresolvedIdentifiers);
});
});
}
importAdder.writeFixes(changes, getQuotePreference(copiedFrom ? copiedFrom.file : targetFile, preferences));
});
/**
* If there are no import fixes, getPasteEdits should return without making any changes to the file.
*/
if (!importAdder!.hasFixes()) {
return;
}
pasteLocations.forEach((paste, i) => {
changes.replaceRangeWithText(
targetFile,
{ pos: paste.pos, end: paste.end },
actualPastedText ?? pastedText[i],
);
});
}
/**
* Adjusts the range for `getUsageInfo` to correctly include identifiers at the edges of the copied text.
*/
function getUsageInfoRangeForPasteEdits({ file: sourceFile, range }: CopiedFromInfo) {
const pos = range[0].pos;
const end = range[range.length - 1].end;
const startToken = getTokenAtPosition(sourceFile, pos);
const endToken = findTokenOnLeftOfPosition(sourceFile, pos) ?? getTokenAtPosition(sourceFile, end);
// Since the range is only used to check identifiers, we do not need to adjust range when the tokens at the edges are not identifiers.
return {
pos: isIdentifier(startToken) && pos <= startToken.getStart(sourceFile) ? startToken.getFullStart() : pos,
end: isIdentifier(endToken) && end === endToken.getEnd() ? textChanges.getAdjustedEndPosition(sourceFile, endToken, {}) : end,
};
} | typescript | github | https://github.com/microsoft/TypeScript | src/services/pasteEdits.ts |
"""The tests for the Owntracks device tracker."""
import json
import os
import unittest
from collections import defaultdict
from homeassistant.components import device_tracker
from homeassistant.const import (STATE_NOT_HOME, CONF_PLATFORM)
import homeassistant.components.device_tracker.owntracks as owntracks
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
USER = 'greg'
DEVICE = 'phone'
LOCATION_TOPIC = "owntracks/{}/{}".format(USER, DEVICE)
EVENT_TOPIC = "owntracks/{}/{}/event".format(USER, DEVICE)
DEVICE_TRACKER_STATE = "device_tracker.{}_{}".format(USER, DEVICE)
IBEACON_DEVICE = 'keys'
REGION_TRACKER_STATE = "device_tracker.beacon_{}".format(IBEACON_DEVICE)
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
LOCATION_MESSAGE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 1.0,
't': 'u',
'alt': 27,
'acc': 60,
'p': 101.3977584838867,
'vac': 4,
'lat': 2.0,
'_type': 'location',
'tst': 1,
'vel': 0}
LOCATION_MESSAGE_INACCURATE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 2.0,
't': 'u',
'alt': 27,
'acc': 2000,
'p': 101.3977584838867,
'vac': 4,
'lat': 6.0,
'_type': 'location',
'tst': 1,
'vel': 0}
REGION_ENTER_MESSAGE = {
'lon': 1.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_MESSAGE = {
'lon': 1.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_INACCURATE_MESSAGE = {
'lon': 10.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 2000,
'tst': 2,
'lat': 20.0,
'_type': 'transition'}
class TestDeviceTrackerOwnTracks(unittest.TestCase):
"""Test the OwnTrack sensor."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200
}}))
self.hass.states.set(
'zone.inner', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.inner_2', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.outer', 'zoning',
{
'name': 'zone',
'latitude': 2.0,
'longitude': 1.0,
'radius': 100000
})
self.hass.states.set(
'zone.passive', 'zoning',
{
'name': 'zone',
'latitude': 3.0,
'longitude': 1.0,
'radius': 10,
'passive': True
})
# Clear state between teste
self.hass.states.set(DEVICE_TRACKER_STATE, None)
owntracks.REGIONS_ENTERED = defaultdict(list)
owntracks.MOBILE_BEACONS_ACTIVE = defaultdict(list)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
try:
os.remove(self.hass.config.path(device_tracker.YAML_DEVICES))
except FileNotFoundError:
pass
def send_message(self, topic, message):
"""Test the sending of a message."""
fire_mqtt_message(
self.hass, topic, json.dumps(message))
self.hass.pool.block_till_done()
def assert_location_state(self, location):
"""Test the assertion of a location state."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_location_latitude(self, latitude):
"""Test the assertion of a location latitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_location_longitude(self, longitude):
"""Test the assertion of a location longitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('longitude'), longitude)
def assert_location_accuracy(self, accuracy):
"""Test the assertion of a location accuracy."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
def assert_tracker_state(self, location):
"""Test the assertion of a tracker state."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_tracker_latitude(self, latitude):
"""Test the assertion of a tracker latitude."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_tracker_accuracy(self, accuracy):
"""Test the assertion of a tracker accuracy."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
def test_location_update(self):
"""Test the update of a location."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
def test_location_inaccurate_gps(self):
"""Test the location for inaccurate GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
self.assert_location_latitude(2.0)
self.assert_location_longitude(1.0)
def test_event_entry_exit(self):
"""Test the entry event."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
# Exit switches back to GPS
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
# Left clean zone state
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_entry_exit_inaccurate(self):
"""Test the event for inaccurate exit."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_INACCURATE_MESSAGE)
# Exit doesn't use inaccurate gps
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_exit_outside_zone_sets_away(self):
"""Test the event for exit zone."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Exit message far away GPS location
message = REGION_LEAVE_MESSAGE.copy()
message['lon'] = 90.1
message['lat'] = 90.1
self.send_message(EVENT_TOPIC, message)
# Exit forces zone change to away
self.assert_location_state(STATE_NOT_HOME)
def test_event_entry_exit_right_order(self):
"""Test the event for ordering."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner_2 - should be in 'inner'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner - should be in 'outer'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('outer')
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
def test_event_entry_exit_wrong_order(self):
"""Test the event for wrong order."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner - should still be in 'inner_2'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'outer'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('outer')
def test_event_entry_exit_passive_zone(self):
"""Test the event for passive zone exits."""
# Enter passive zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "passive"
self.send_message(EVENT_TOPIC, message)
# Should pick up gps put not zone
self.assert_location_state('not_home')
self.assert_location_latitude(3.0)
self.assert_location_accuracy(10.0)
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner_2 - should be in 'passive'
# ie gps co-ords - but not zone
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('not_home')
self.assert_location_latitude(3.0)
self.assert_location_accuracy(10.0)
# Exit passive - should be in 'outer'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "passive"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('outer')
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
def test_event_entry_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_exit_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_entry_zone_loading_dash(self):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Ownracks uses this to switch on hold
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "-inner"
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
def test_mobile_enter_move_beacon(self):
"""Test the movement of a beacon."""
# Enter mobile beacon, should set location
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Move should move beacon
message = LOCATION_MESSAGE.copy()
message['lat'] = "3.0"
self.send_message(LOCATION_TOPIC, message)
self.assert_tracker_latitude(3.0)
self.assert_tracker_state(STATE_NOT_HOME)
def test_mobile_enter_exit_region_beacon(self):
"""Test the enter and the exit of a region beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Enter location should move beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.1)
self.assert_tracker_state('inner_2')
# Exit location should switch to gps
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
def test_mobile_exit_move_beacon(self):
"""Test the exit move of a beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Exit mobile beacon, should set location
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
message['lat'] = "3.0"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(3.0)
# Move after exit should do nothing
message = LOCATION_MESSAGE.copy()
message['lat'] = "4.0"
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_tracker_latitude(3.0)
def test_mobile_multiple_async_enter_exit(self):
"""Test the multiple entering."""
# Test race condition
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
for i in range(0, 20):
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(exit_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
self.hass.pool.block_till_done()
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(owntracks.MOBILE_BEACONS_ACTIVE['greg_phone'], [])
def test_mobile_multiple_enter_exit(self):
"""Test the multiple entering."""
# Should only happen if the iphone dies
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(owntracks.MOBILE_BEACONS_ACTIVE['greg_phone'], []) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: GPL-2.0-only
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,gcc-apq8064.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Global Clock & Reset Controller on APQ8064/MSM8960
maintainers:
- Stephen Boyd <sboyd@kernel.org>
- Taniya Das <quic_tdas@quicinc.com>
description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on APQ8064.
See also::
include/dt-bindings/clock/qcom,gcc-msm8960.h
include/dt-bindings/reset/qcom,gcc-msm8960.h
allOf:
- $ref: qcom,gcc.yaml#
properties:
compatible:
oneOf:
- items:
- enum:
- qcom,gcc-apq8064
- qcom,gcc-msm8960
- const: syscon
- enum:
- qcom,gcc-apq8064
- qcom,gcc-msm8960
deprecated: true
thermal-sensor:
description: child tsens device
$ref: /schemas/thermal/qcom-tsens.yaml#
clocks:
maxItems: 3
clock-names:
items:
- const: cxo
- const: pxo
- const: pll4
nvmem-cells:
minItems: 1
maxItems: 2
deprecated: true
description:
Qualcomm TSENS (thermal sensor device) on some devices can
be part of GCC and hence the TSENS properties can also be part
of the GCC/clock-controller node.
For more details on the TSENS properties please refer
Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
nvmem-cell-names:
minItems: 1
deprecated: true
items:
- const: calib
- const: calib_backup
'#thermal-sensor-cells':
const: 1
deprecated: true
'#power-domain-cells': false
required:
- compatible
unevaluatedProperties: false
examples:
- |
clock-controller@900000 {
compatible = "qcom,gcc-apq8064", "syscon";
reg = <0x00900000 0x4000>;
#clock-cells = <1>;
#reset-cells = <1>;
thermal-sensor {
compatible = "qcom,msm8960-tsens";
nvmem-cells = <&tsens_calib>, <&tsens_backup>;
nvmem-cell-names = "calib", "calib_backup";
interrupts = <0 178 4>;
interrupt-names = "uplow";
#qcom,sensors = <11>;
#thermal-sensor-cells = <1>;
};
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml |
import lit.formats
config.name = "clangIncludeCleaner Unit Tests"
config.test_format = lit.formats.GoogleTest(".", "Tests")
config.test_source_root = config.clang_include_cleaner_binary_dir + "/unittests"
config.test_exec_root = config.clang_include_cleaner_binary_dir + "/unittests"
# Point the dynamic loader at dynamic libraries in 'lib'.
# FIXME: it seems every project has a copy of this logic. Move it somewhere.
import platform
if platform.system() == "Darwin":
shlibpath_var = "DYLD_LIBRARY_PATH"
elif platform.system() == "Windows" or sys.platform == "cygwin":
shlibpath_var = "PATH"
else:
shlibpath_var = "LD_LIBRARY_PATH"
config.environment[shlibpath_var] = os.path.pathsep.join(
(config.shlibdir, config.llvm_libs_dir, config.environment.get(shlibpath_var, ""))
)
# It is not realistically possible to account for all options that could
# possibly be present in system and user configuration files, so disable
# default configs for the test runs.
config.environment["CLANG_NO_DEFAULT_CONFIG"] = "1" | python | github | https://github.com/llvm/llvm-project | clang-tools-extra/include-cleaner/test/Unit/lit.cfg.py |
data = (
' ', # 0x00
'a', # 0x01
'1', # 0x02
'b', # 0x03
'\'', # 0x04
'k', # 0x05
'2', # 0x06
'l', # 0x07
'@', # 0x08
'c', # 0x09
'i', # 0x0a
'f', # 0x0b
'/', # 0x0c
'm', # 0x0d
's', # 0x0e
'p', # 0x0f
'"', # 0x10
'e', # 0x11
'3', # 0x12
'h', # 0x13
'9', # 0x14
'o', # 0x15
'6', # 0x16
'r', # 0x17
'^', # 0x18
'd', # 0x19
'j', # 0x1a
'g', # 0x1b
'>', # 0x1c
'n', # 0x1d
't', # 0x1e
'q', # 0x1f
',', # 0x20
'*', # 0x21
'5', # 0x22
'<', # 0x23
'-', # 0x24
'u', # 0x25
'8', # 0x26
'v', # 0x27
'.', # 0x28
'%', # 0x29
'[', # 0x2a
'$', # 0x2b
'+', # 0x2c
'x', # 0x2d
'!', # 0x2e
'&', # 0x2f
';', # 0x30
':', # 0x31
'4', # 0x32
'\\', # 0x33
'0', # 0x34
'z', # 0x35
'7', # 0x36
'(', # 0x37
'_', # 0x38
'?', # 0x39
'w', # 0x3a
']', # 0x3b
'#', # 0x3c
'y', # 0x3d
')', # 0x3e
'=', # 0x3f
'[d7]', # 0x40
'[d17]', # 0x41
'[d27]', # 0x42
'[d127]', # 0x43
'[d37]', # 0x44
'[d137]', # 0x45
'[d237]', # 0x46
'[d1237]', # 0x47
'[d47]', # 0x48
'[d147]', # 0x49
'[d247]', # 0x4a
'[d1247]', # 0x4b
'[d347]', # 0x4c
'[d1347]', # 0x4d
'[d2347]', # 0x4e
'[d12347]', # 0x4f
'[d57]', # 0x50
'[d157]', # 0x51
'[d257]', # 0x52
'[d1257]', # 0x53
'[d357]', # 0x54
'[d1357]', # 0x55
'[d2357]', # 0x56
'[d12357]', # 0x57
'[d457]', # 0x58
'[d1457]', # 0x59
'[d2457]', # 0x5a
'[d12457]', # 0x5b
'[d3457]', # 0x5c
'[d13457]', # 0x5d
'[d23457]', # 0x5e
'[d123457]', # 0x5f
'[d67]', # 0x60
'[d167]', # 0x61
'[d267]', # 0x62
'[d1267]', # 0x63
'[d367]', # 0x64
'[d1367]', # 0x65
'[d2367]', # 0x66
'[d12367]', # 0x67
'[d467]', # 0x68
'[d1467]', # 0x69
'[d2467]', # 0x6a
'[d12467]', # 0x6b
'[d3467]', # 0x6c
'[d13467]', # 0x6d
'[d23467]', # 0x6e
'[d123467]', # 0x6f
'[d567]', # 0x70
'[d1567]', # 0x71
'[d2567]', # 0x72
'[d12567]', # 0x73
'[d3567]', # 0x74
'[d13567]', # 0x75
'[d23567]', # 0x76
'[d123567]', # 0x77
'[d4567]', # 0x78
'[d14567]', # 0x79
'[d24567]', # 0x7a
'[d124567]', # 0x7b
'[d34567]', # 0x7c
'[d134567]', # 0x7d
'[d234567]', # 0x7e
'[d1234567]', # 0x7f
'[d8]', # 0x80
'[d18]', # 0x81
'[d28]', # 0x82
'[d128]', # 0x83
'[d38]', # 0x84
'[d138]', # 0x85
'[d238]', # 0x86
'[d1238]', # 0x87
'[d48]', # 0x88
'[d148]', # 0x89
'[d248]', # 0x8a
'[d1248]', # 0x8b
'[d348]', # 0x8c
'[d1348]', # 0x8d
'[d2348]', # 0x8e
'[d12348]', # 0x8f
'[d58]', # 0x90
'[d158]', # 0x91
'[d258]', # 0x92
'[d1258]', # 0x93
'[d358]', # 0x94
'[d1358]', # 0x95
'[d2358]', # 0x96
'[d12358]', # 0x97
'[d458]', # 0x98
'[d1458]', # 0x99
'[d2458]', # 0x9a
'[d12458]', # 0x9b
'[d3458]', # 0x9c
'[d13458]', # 0x9d
'[d23458]', # 0x9e
'[d123458]', # 0x9f
'[d68]', # 0xa0
'[d168]', # 0xa1
'[d268]', # 0xa2
'[d1268]', # 0xa3
'[d368]', # 0xa4
'[d1368]', # 0xa5
'[d2368]', # 0xa6
'[d12368]', # 0xa7
'[d468]', # 0xa8
'[d1468]', # 0xa9
'[d2468]', # 0xaa
'[d12468]', # 0xab
'[d3468]', # 0xac
'[d13468]', # 0xad
'[d23468]', # 0xae
'[d123468]', # 0xaf
'[d568]', # 0xb0
'[d1568]', # 0xb1
'[d2568]', # 0xb2
'[d12568]', # 0xb3
'[d3568]', # 0xb4
'[d13568]', # 0xb5
'[d23568]', # 0xb6
'[d123568]', # 0xb7
'[d4568]', # 0xb8
'[d14568]', # 0xb9
'[d24568]', # 0xba
'[d124568]', # 0xbb
'[d34568]', # 0xbc
'[d134568]', # 0xbd
'[d234568]', # 0xbe
'[d1234568]', # 0xbf
'[d78]', # 0xc0
'[d178]', # 0xc1
'[d278]', # 0xc2
'[d1278]', # 0xc3
'[d378]', # 0xc4
'[d1378]', # 0xc5
'[d2378]', # 0xc6
'[d12378]', # 0xc7
'[d478]', # 0xc8
'[d1478]', # 0xc9
'[d2478]', # 0xca
'[d12478]', # 0xcb
'[d3478]', # 0xcc
'[d13478]', # 0xcd
'[d23478]', # 0xce
'[d123478]', # 0xcf
'[d578]', # 0xd0
'[d1578]', # 0xd1
'[d2578]', # 0xd2
'[d12578]', # 0xd3
'[d3578]', # 0xd4
'[d13578]', # 0xd5
'[d23578]', # 0xd6
'[d123578]', # 0xd7
'[d4578]', # 0xd8
'[d14578]', # 0xd9
'[d24578]', # 0xda
'[d124578]', # 0xdb
'[d34578]', # 0xdc
'[d134578]', # 0xdd
'[d234578]', # 0xde
'[d1234578]', # 0xdf
'[d678]', # 0xe0
'[d1678]', # 0xe1
'[d2678]', # 0xe2
'[d12678]', # 0xe3
'[d3678]', # 0xe4
'[d13678]', # 0xe5
'[d23678]', # 0xe6
'[d123678]', # 0xe7
'[d4678]', # 0xe8
'[d14678]', # 0xe9
'[d24678]', # 0xea
'[d124678]', # 0xeb
'[d34678]', # 0xec
'[d134678]', # 0xed
'[d234678]', # 0xee
'[d1234678]', # 0xef
'[d5678]', # 0xf0
'[d15678]', # 0xf1
'[d25678]', # 0xf2
'[d125678]', # 0xf3
'[d35678]', # 0xf4
'[d135678]', # 0xf5
'[d235678]', # 0xf6
'[d1235678]', # 0xf7
'[d45678]', # 0xf8
'[d145678]', # 0xf9
'[d245678]', # 0xfa
'[d1245678]', # 0xfb
'[d345678]', # 0xfc
'[d1345678]', # 0xfd
'[d2345678]', # 0xfe
'[d12345678]', # 0xff
) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package awsauth
import (
"context"
"fmt"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/authmetadata"
"github.com/hashicorp/vault/sdk/logical"
)
var (
// iamAuthMetadataFields is a list of the default auth metadata
// added to tokens during login. The default alias type used
// by this back-end is the role ID. Subsequently, the default
// fields included are expected to have a low rate of change
// when the role ID is in use.
iamAuthMetadataFields = &authmetadata.Fields{
FieldName: "iam_metadata",
Default: []string{
"account_id",
"auth_type",
},
AvailableToAdd: []string{
"canonical_arn",
"client_arn",
"client_user_id",
"inferred_aws_region",
"inferred_entity_id",
"inferred_entity_type",
"inferred_hostname",
},
}
// ec2AuthMetadataFields is a list of the default auth metadata
// added to tokens during login. The default alias type used
// by this back-end is the role ID. Subsequently, the default
// fields included are expected to have a low rate of change
// when the role ID is in use.
ec2AuthMetadataFields = &authmetadata.Fields{
FieldName: "ec2_metadata",
Default: []string{
"account_id",
"auth_type",
},
AvailableToAdd: []string{
"ami_id",
"instance_id",
"region",
},
}
)
func (b *backend) pathConfigIdentity() *framework.Path {
return &framework.Path{
Pattern: "config/identity$",
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixAWS,
},
Fields: map[string]*framework.FieldSchema{
"iam_alias": {
Type: framework.TypeString,
Default: identityAliasIAMUniqueID,
Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, %q and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn, identityAliasRoleID),
},
iamAuthMetadataFields.FieldName: authmetadata.FieldSchema(iamAuthMetadataFields),
"ec2_alias": {
Type: framework.TypeString,
Default: identityAliasEC2InstanceID,
Description: fmt.Sprintf("Configure how the AWS auth method generates entity alias when using EC2 auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasEC2InstanceID, identityAliasEC2ImageID, identityAliasRoleID),
},
ec2AuthMetadataFields.FieldName: authmetadata.FieldSchema(ec2AuthMetadataFields),
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: pathConfigIdentityRead,
DisplayAttrs: &framework.DisplayAttributes{
OperationSuffix: "identity-integration-configuration",
},
},
logical.UpdateOperation: &framework.PathOperation{
Callback: pathConfigIdentityUpdate,
DisplayAttrs: &framework.DisplayAttributes{
OperationVerb: "configure",
OperationSuffix: "identity-integration",
},
},
},
HelpSynopsis: pathConfigIdentityHelpSyn,
HelpDescription: pathConfigIdentityHelpDesc,
}
}
func identityConfigEntry(ctx context.Context, s logical.Storage) (*identityConfig, error) {
entryRaw, err := s.Get(ctx, "config/identity")
if err != nil {
return nil, err
}
entry := &identityConfig{
IAMAuthMetadataHandler: authmetadata.NewHandler(iamAuthMetadataFields),
EC2AuthMetadataHandler: authmetadata.NewHandler(ec2AuthMetadataFields),
}
if entryRaw != nil {
if err := entryRaw.DecodeJSON(entry); err != nil {
return nil, err
}
}
if entry.IAMAlias == "" {
entry.IAMAlias = identityAliasRoleID
}
if entry.EC2Alias == "" {
entry.EC2Alias = identityAliasRoleID
}
return entry, nil
}
func pathConfigIdentityRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) {
config, err := identityConfigEntry(ctx, req.Storage)
if err != nil {
return nil, err
}
return &logical.Response{
Data: map[string]interface{}{
"iam_alias": config.IAMAlias,
iamAuthMetadataFields.FieldName: config.IAMAuthMetadataHandler.AuthMetadata(),
"ec2_alias": config.EC2Alias,
ec2AuthMetadataFields.FieldName: config.EC2AuthMetadataHandler.AuthMetadata(),
},
}, nil
}
func pathConfigIdentityUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
config, err := identityConfigEntry(ctx, req.Storage)
if err != nil {
return nil, err
}
iamAliasRaw, ok := data.GetOk("iam_alias")
if ok {
iamAlias := iamAliasRaw.(string)
allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn}
if !strutil.StrListContains(allowedIAMAliasValues, iamAlias) {
return logical.ErrorResponse(fmt.Sprintf("iam_alias of %q not in set of allowed values: %v", iamAlias, allowedIAMAliasValues)), nil
}
config.IAMAlias = iamAlias
}
ec2AliasRaw, ok := data.GetOk("ec2_alias")
if ok {
ec2Alias := ec2AliasRaw.(string)
allowedEC2AliasValues := []string{identityAliasRoleID, identityAliasEC2InstanceID, identityAliasEC2ImageID}
if !strutil.StrListContains(allowedEC2AliasValues, ec2Alias) {
return logical.ErrorResponse(fmt.Sprintf("ec2_alias of %q not in set of allowed values: %v", ec2Alias, allowedEC2AliasValues)), nil
}
config.EC2Alias = ec2Alias
}
if err := config.IAMAuthMetadataHandler.ParseAuthMetadata(data); err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
if err := config.EC2AuthMetadataHandler.ParseAuthMetadata(data); err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
entry, err := logical.StorageEntryJSON("config/identity", config)
if err != nil {
return nil, err
}
err = req.Storage.Put(ctx, entry)
if err != nil {
return nil, err
}
return nil, nil
}
type identityConfig struct {
IAMAlias string `json:"iam_alias"`
IAMAuthMetadataHandler *authmetadata.Handler `json:"iam_auth_metadata_handler"`
EC2Alias string `json:"ec2_alias"`
EC2AuthMetadataHandler *authmetadata.Handler `json:"ec2_auth_metadata_handler"`
}
const (
identityAliasIAMUniqueID = "unique_id"
identityAliasIAMFullArn = "full_arn"
identityAliasIAMCanonicalArn = "canonical_arn"
identityAliasEC2InstanceID = "instance_id"
identityAliasEC2ImageID = "image_id"
identityAliasRoleID = "role_id"
)
const pathConfigIdentityHelpSyn = `
Configure the way the AWS auth method interacts with the identity store
`
const pathConfigIdentityHelpDesc = `
The AWS auth backend defaults to aliasing an IAM principal's unique ID to the
identity store. This path allows users to change how Vault configures the
mapping to Identity aliases for more flexibility.
You can set the iam_alias parameter to one of the following values:
* 'unique_id': This retains Vault's default behavior
* 'full_arn': This maps the full authenticated ARN to the identity alias, e.g.,
"arn:aws:sts::<account_id>:assumed-role/<role_name>/<role_session_name>
This is useful where you have an identity provder that sets role_session_name
to a known value of a person, such as a username or email address, and allows
you to map those roles back to entries in your identity store.
` | go | github | https://github.com/hashicorp/vault | builtin/credential/aws/path_config_identity.go |
/* Copyright (c) 2002,2003,2005 CrystalClear Software, Inc.
* Use, modification and distribution is subject to the
* Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
* Author: Jeff Garland, Bart Garst
*/
#ifndef DATE_TIME_DATE_DST_TRANSITION_DAY_GEN_HPP__
#define DATE_TIME_DATE_DST_TRANSITION_DAY_GEN_HPP__
#include <string>
namespace boost {
namespace date_time {
//! Defines base interface for calculating start and end date of daylight savings
template<class date_type>
class dst_day_calc_rule
{
public:
typedef typename date_type::year_type year_type;
virtual ~dst_day_calc_rule() {}
virtual date_type start_day(year_type y) const=0;
virtual std::string start_rule_as_string() const=0;
virtual date_type end_day(year_type y) const=0;
virtual std::string end_rule_as_string() const=0;
};
//! Canonical form for a class that provides day rule calculation
/*! This class is used to generate specific sets of dst rules
*
*@tparam spec Provides a specifiction of the function object types used
* to generate start and end days of daylight savings as well
* as the date type.
*/
template<class spec>
class day_calc_dst_rule : public dst_day_calc_rule<typename spec::date_type>
{
public:
typedef typename spec::date_type date_type;
typedef typename date_type::year_type year_type;
typedef typename spec::start_rule start_rule;
typedef typename spec::end_rule end_rule;
day_calc_dst_rule(start_rule dst_start,
end_rule dst_end) :
dst_start_(dst_start),
dst_end_(dst_end)
{}
virtual date_type start_day(year_type y) const
{
return dst_start_.get_date(y);
}
virtual std::string start_rule_as_string() const
{
return dst_start_.to_string();
}
virtual date_type end_day(year_type y) const
{
return dst_end_.get_date(y);
}
virtual std::string end_rule_as_string() const
{
return dst_end_.to_string();
}
private:
start_rule dst_start_;
end_rule dst_end_;
};
} }//namespace
#endif | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/date_time/dst_transition_generators.hpp |
/*
* Protractor support is deprecated in Angular.
* Protractor is used in this example for compatibility with Angular documentation tools.
*/
import {bootstrapApplication, provideProtractorTestingSupport} from '@angular/platform-browser';
import {App} from './app/app';
import {provideRouter} from '@angular/router';
import routeConfig from './app/routes';
bootstrapApplication(App, {
providers: [provideProtractorTestingSupport(), provideRouter(routeConfig)],
}).catch((err) => console.error(err)); | typescript | github | https://github.com/angular/angular | adev/src/content/tutorials/first-app/steps/11-details-page/src/main.ts |
export function f() {
return 1;
}
export const b = 2;
export const usedExports = __webpack_exports_info__.usedExports; | javascript | github | https://github.com/webpack/webpack | test/cases/chunks/statical-dynamic-import/dir4/lib/b.js |
#!/usr/bin/python -u
#
#
# This source code is part of tcga, a TCGA processing pipeline, written by Ivana Mihalek.
# Copyright (C) 2014-2016 Ivana Mihalek.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: ivana.mihalek@gmail.com
#
#
# requests lib http://docs.python-requests.org/en/latest/
#
# html parser: http://www.crummy.com/software/BeautifulSoup/,
# see also http://stackoverflow.com/questions/11709079/parsing-html-python
#
#
import requests
#from BeautifulSoup import BeautifulSoup
from bs4 import BeautifulSoup
# Set security warning to always go off by default.
import warnings
not_interesting = ['bio', 'pathology_reports', 'diagnostic_images', 'tissue_images',
'mirnaseq', 'miRNASeq', 'mirna', 'transcriptome',
'methylation', 'protein_exp', 'mutations_protected',
'cdna', 'cna', 'bisulfiteseq', 'rnaseqv2','totalrnaseqv2',
'microsat_i', 'exon', 'tracerel']
target_set = 'mutations'
# this script can be used to locate data files to be downloaded by 201_tcga_http_dnwld.data
############################
# to download rnaseq based expression data use 02_expression/100_rnaseq_http_dwnld.py
############################
# apparently 'transcriptome' is a code word for array based gene expression in TCGA,
# and the method has been somewhat discredited lately
############################
if target_set == 'mutations': # we are looking for somatic mutations
not_interesting += ['snp', 'rnaseq']
elif target_set == 'cnv':
not_interesting += ['mutations', 'rnaseq']
elif target_set == 'expression':
not_interesting += ['mutations', 'snp']
else:
print "target set", target_set, " not recognized"
##########################################
def get_page_text (httpaddr):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
http_req = requests.get(httpaddr)
return http_req.text
#########################################
def recursive_descent (name_pieces):
tcga_http = "".join(name_pieces)
ret_text = get_page_text(tcga_http)
soup = BeautifulSoup(ret_text)
#print dir(soup)
for link in soup.findAll('a'):
if 'Parent Directory' in link.getText(): continue
new_name_piece = link.get('href')
if 'http' in new_name_piece: continue
# here we are picking the type of data we are looking for
# the whole database is such a mess I do not know how to look for
# the data type I need in a more robust way
#print new_name_piece[:-1], "not interesting?", new_name_piece[:-1] in not_interesting
if new_name_piece[:-1] in not_interesting: continue
if target_set == 'mutations':
if 'Level_1' in new_name_piece: continue
if 'Level_3' in new_name_piece: continue
else:
if 'Level_1' in new_name_piece: continue
if 'Level_2' in new_name_piece: continue
if new_name_piece[-1] != '/' :
if target_set == 'expression':
if 'trimmed.annotated.gene.quantification.txt' in new_name_piece:
print "".join(name_pieces[1:]), ": ",
print new_name_piece
elif 'tar.gz' in new_name_piece[-6:] and not 'mage-tab' in new_name_piece:
print "".join(name_pieces[1:]), ": ",
print new_name_piece
continue
name_pieces.append(new_name_piece)
recursive_descent (name_pieces)
name_pieces.pop()
return
#########################################
def main():
main_url = 'https://tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous/tumor/'
# recursive descent is well, a recursion, which we seed with the core url string
name_pieces = [main_url]
recursive_descent (name_pieces)
#########################################
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.inventory import Inventory
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a',None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],
'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
}
ranges_to_expand = {
'a[1:2]': ['a1', 'a2'],
'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
'a[a:b]': ['aa', 'ab'],
'a[a:i:3]': ['aa', 'ad', 'ag'],
'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
}
def setUp(self):
v = VariableManager()
fake_loader = DictDataLoader({})
self.i = Inventory(loader=fake_loader, variable_manager=v, host_list='')
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, self.i.split_host_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, self.i.split_host_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
)
def test_expand_hostname_range(self):
for e in self.ranges_to_expand:
r = self.ranges_to_expand[e]
self.assertEqual(r, expand_hostname_range(e))
class InventoryDefaultGroup(unittest.TestCase):
def test_empty_inventory(self):
inventory = self._get_inventory('')
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
self.assertFalse(inventory.groups['all'].get_hosts())
self.assertFalse(inventory.groups['ungrouped'].get_hosts())
def test_ini(self):
self._test_default_groups("""
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_explicit_ungrouped(self):
self._test_default_groups("""
[ungrouped]
host1
host2
host3
[servers]
host3
host4
host5
""")
def _get_inventory(self, inventory_content):
v = VariableManager()
fake_loader = DictDataLoader({
'hosts': inventory_content
})
with patch.object(Inventory, 'basedir') as mock_basedir:
mock_basedir.return_value = './'
return Inventory(loader=fake_loader, variable_manager=v, host_list='hosts')
def _test_default_groups(self, inventory_content):
inventory = self._get_inventory(inventory_content)
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3']), ungrouped_hosts)
servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts) | unknown | codeparrot/codeparrot-clean | ||
"""
====================================
Plotting Cross-Validated Predictions
====================================
This example shows how to use
:func:`~sklearn.model_selection.cross_val_predict` together with
:class:`~sklearn.metrics.PredictionErrorDisplay` to visualize prediction
errors.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# We will load the diabetes dataset and create an instance of a linear
# regression model.
from sklearn.datasets import load_diabetes
from sklearn.linear_model import LinearRegression
X, y = load_diabetes(return_X_y=True)
lr = LinearRegression()
# %%
# :func:`~sklearn.model_selection.cross_val_predict` returns an array of the
# same size of `y` where each entry is a prediction obtained by cross
# validation.
from sklearn.model_selection import cross_val_predict
y_pred = cross_val_predict(lr, X, y, cv=10)
# %%
# Since `cv=10`, it means that we trained 10 models and each model was
# used to predict on one of the 10 folds. We can now use the
# :class:`~sklearn.metrics.PredictionErrorDisplay` to visualize the
# prediction errors.
#
# On the left axis, we plot the observed values :math:`y` vs. the predicted
# values :math:`\hat{y}` given by the models. On the right axis, we plot the
# residuals (i.e. the difference between the observed values and the predicted
# values) vs. the predicted values.
import matplotlib.pyplot as plt
from sklearn.metrics import PredictionErrorDisplay
fig, axs = plt.subplots(ncols=2, figsize=(8, 4))
PredictionErrorDisplay.from_predictions(
y,
y_pred=y_pred,
kind="actual_vs_predicted",
subsample=100,
ax=axs[0],
random_state=0,
)
axs[0].set_title("Actual vs. Predicted values")
PredictionErrorDisplay.from_predictions(
y,
y_pred=y_pred,
kind="residual_vs_predicted",
subsample=100,
ax=axs[1],
random_state=0,
)
axs[1].set_title("Residuals vs. Predicted Values")
fig.suptitle("Plotting cross-validated predictions")
plt.tight_layout()
plt.show()
# %%
# It is important to note that we used
# :func:`~sklearn.model_selection.cross_val_predict` for visualization
# purpose only in this example.
#
# It would be problematic to
# quantitatively assess the model performance by computing a single
# performance metric from the concatenated predictions returned by
# :func:`~sklearn.model_selection.cross_val_predict`
# when the different CV folds vary by size and distributions.
#
# It is recommended to compute per-fold performance metrics using:
# :func:`~sklearn.model_selection.cross_val_score` or
# :func:`~sklearn.model_selection.cross_validate` instead. | python | github | https://github.com/scikit-learn/scikit-learn | examples/model_selection/plot_cv_predict.py |
"""
Jacobian matrix operations.
@author: Peter Corke
@copyright: Peter Corke
"""
from numpy import *
from robot.utility import *
from robot.transform import *
from robot.kinematics import *
from numpy.linalg import norm
def jacob0(robot, q):
"""
Compute manipulator Jacobian in world coordinates for joint coordinates C{q}.
The manipulator Jacobian matrix maps differential changes in joint space
to differential Cartesian motion (world coord frame) of the end-effector
M{dX = J dQ}
@type robot: Robot
@type q: vector M{n x 1}
@param q: joint coordinate
@rtype: matrix M{6 x n}
@return: Manipulator Jacobian
@see: L{jacobn}, L{diff2tr}, L{tr2diff}
"""
q = mat(q)
Jn = jacobn(robot, q) # Jacobian from joint to wrist space
# convert to Jacobian in base coordinates
Tn = fkine(robot,q) # end-effector transformation
R = t2r(Tn)
return concatenate( ( concatenate( (R,zeros((3,3))) ,1) , concatenate( (zeros((3,3)),R) ,1) ))*Jn
def jacobn(robot, q):
"""
Compute manipulator Jacobian in tool coordinates for joint coordinates C{q}.
The manipulator Jacobian matrix maps differential changes in joint space
to differential Cartesian motion (tool coord frame) of the end-effector.
M{dX = J dQ}
Reference
=========
Paul, Shimano, Mayer
Differential Kinematic Control Equations for Simple Manipulators
IEEE SMC 11(6) 1981
pp. 456-460
@type robot: Robot
@type q: vector M{n x 1}
@param q: joint coordinate
@rtype: matrix M{6 x n}
@return: Manipulator Jacobian
@see: L{jacobn}, L{diff2tr}, L{tr2diff}
"""
q = arg2array(q)
n = robot.n
L = robot.links
J = mat([[],[],[],[],[],[]])
U = robot.tool
for j in range(n-1,-1,-1):
if not robot.ismdh(): #standard DH convention
U = L[j].tr(q[j])*U
if L[j].sigma == 0: #revolute axis
d = matrix([[-U[0,0]*U[1,3] + U[1,0]*U[0,3]],\
[-U[0,1]*U[1,3] + U[1,1]*U[0,3]],\
[-U[0,2]*U[1,3] + U[1,2]*U[0,3]]])
delta = U[2,0:3].T # nz oz az
else: #prismatic axis
d = U[2,0:3].T # nz oz az
delta = zeros((3,1)) # 0 0 0
J = concatenate((concatenate((d,delta)),J),1)
if robot.ismdh(): #modified DH convention
U=L[j].tr(q[j])*U
return J
def tr2jac(t):
"""
Compute a Jacobian to map differentials motion between frames.
The M{6x6} Jacobian matrix to map differentials (joint velocity) between
frames related by the homogeneous transform C{t}.
@rtype: matrix M{6 x 6}
@return: Jacobian matrix
@see: L{tr2diff}, L{diff2tr}
"""
t = mat(t)
return concatenate((
concatenate((t[0:3,0].T, crossp(t[0:3,3],t[0:3,0]).T),1),
concatenate((t[0:3,1].T, crossp(t[0:3,3],t[0:3,1]).T),1),
concatenate((t[0:3,2].T, crossp(t[0:3,3],t[0:3,2]).T),1),
concatenate((zeros((3,3)),t[0:3,0:3].T),1) )) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import os.path
import fireplace
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), "README.md")).read()
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)"
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Games/Entertainment :: Simulation",
]
setup(
name="fireplace",
version=fireplace.__version__,
packages=find_packages(exclude="tests"),
package_data={"": ["*.xml"]},
include_package_data=True,
tests_require=["pytest"],
author=fireplace.__author__,
author_email=fireplace.__email__,
description="Pure-python Hearthstone re-implementation and simulator",
classifiers=CLASSIFIERS,
download_url="https://github.com/jleclanche/python-bna/tarball/master",
long_description=README,
license="AGPLv3",
url="https://github.com/jleclanche/fireplace",
) | unknown | codeparrot/codeparrot-clean | ||
---
navigation_title: "Word delimiter graph"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-word-delimiter-graph-tokenfilter.html
---
# Word delimiter graph token filter [analysis-word-delimiter-graph-tokenfilter]
Splits tokens at non-alphanumeric characters. The `word_delimiter_graph` filter also performs optional token normalization based on a set of rules. By default, the filter uses the following rules:
* Split tokens at non-alphanumeric characters. The filter uses these characters as delimiters. For example: `Super-Duper` → `Super`, `Duper`
* Remove leading or trailing delimiters from each token. For example: `XL---42+'Autocoder'` → `XL`, `42`, `Autocoder`
* Split tokens at letter case transitions. For example: `PowerShot` → `Power`, `Shot`
* Split tokens at letter-number transitions. For example: `XL500` → `XL`, `500`
* Remove the English possessive (`'s`) from the end of each token. For example: `Neil's` → `Neil`
The `word_delimiter_graph` filter uses Lucene’s [WordDelimiterGraphFilter](https://lucene.apache.org/core/10_0_0/analysis/common/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.md).
::::{tip}
The `word_delimiter_graph` filter was designed to remove punctuation from complex identifiers, such as product IDs or part numbers. For these use cases, we recommend using the `word_delimiter_graph` filter with the [`keyword`](/reference/text-analysis/analysis-keyword-tokenizer.md) tokenizer.
Avoid using the `word_delimiter_graph` filter to split hyphenated words, such as `wi-fi`. Because users often search for these words both with and without hyphens, we recommend using the [`synonym_graph`](/reference/text-analysis/analysis-synonym-graph-tokenfilter.md) filter instead.
::::
## Example [analysis-word-delimiter-graph-tokenfilter-analyze-ex]
The following [analyze API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze) request uses the `word_delimiter_graph` filter to split `Neil's-Super-Duper-XL500--42+AutoCoder` into normalized tokens using the filter’s default rules:
```console
GET /_analyze
{
"tokenizer": "keyword",
"filter": [ "word_delimiter_graph" ],
"text": "Neil's-Super-Duper-XL500--42+AutoCoder"
}
```
The filter produces the following tokens:
```txt
[ Neil, Super, Duper, XL, 500, 42, Auto, Coder ]
```
## Add to an analyzer [analysis-word-delimiter-graph-tokenfilter-analyzer-ex]
The following [create index API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create) request uses the `word_delimiter_graph` filter to configure a new [custom analyzer](docs-content://manage-data/data-store/text-analysis/create-custom-analyzer.md).
```console
PUT /my-index-000001
{
"settings": {
"analysis": {
"analyzer": {
"my_analyzer": {
"tokenizer": "keyword",
"filter": [ "word_delimiter_graph" ]
}
}
}
}
}
```
::::{warning}
Avoid using the `word_delimiter_graph` filter with tokenizers that remove punctuation, such as the [`standard`](/reference/text-analysis/analysis-standard-tokenizer.md) tokenizer. This could prevent the `word_delimiter_graph` filter from splitting tokens correctly. It can also interfere with the filter’s configurable parameters, such as [`catenate_all`](#word-delimiter-graph-tokenfilter-catenate-all) or [`preserve_original`](#word-delimiter-graph-tokenfilter-preserve-original). We recommend using the [`keyword`](/reference/text-analysis/analysis-keyword-tokenizer.md) or [`whitespace`](/reference/text-analysis/analysis-whitespace-tokenizer.md) tokenizer instead.
::::
## Configurable parameters [word-delimiter-graph-tokenfilter-configure-parms]
$$$word-delimiter-graph-tokenfilter-adjust-offsets$$$
`adjust_offsets`
: (Optional, Boolean) If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`.
::::{warning}
Set `adjust_offsets` to `false` if your analyzer uses filters, such as the [`trim`](/reference/text-analysis/analysis-trim-tokenfilter.md) filter, that change the length of tokens without changing their offsets. Otherwise, the `word_delimiter_graph` filter could produce tokens with illegal offsets.
::::
$$$word-delimiter-graph-tokenfilter-catenate-all$$$
`catenate_all`
: (Optional, Boolean) If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. For example: `super-duper-xl-500` → [ **`superduperxl500`**, `super`, `duper`, `xl`, `500` ]. Defaults to `false`.
::::{warning}
Setting this parameter to `true` produces multi-position tokens, which are not supported by indexing.
If this parameter is `true`, avoid using this filter in an index analyzer or use the [`flatten_graph`](/reference/text-analysis/analysis-flatten-graph-tokenfilter.md) filter after this filter to make the token stream suitable for indexing.
When used for search analysis, catenated tokens can cause problems for the [`match_phrase`](/reference/query-languages/query-dsl/query-dsl-match-query-phrase.md) query and other queries that rely on token position for matching. Avoid setting this parameter to `true` if you plan to use these queries.
::::
$$$word-delimiter-graph-tokenfilter-catenate-numbers$$$
`catenate_numbers`
: (Optional, Boolean) If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. For example: `01-02-03` → [ **`010203`**, `01`, `02`, `03` ]. Defaults to `false`.
::::{warning}
Setting this parameter to `true` produces multi-position tokens, which are not supported by indexing.
If this parameter is `true`, avoid using this filter in an index analyzer or use the [`flatten_graph`](/reference/text-analysis/analysis-flatten-graph-tokenfilter.md) filter after this filter to make the token stream suitable for indexing.
When used for search analysis, catenated tokens can cause problems for the [`match_phrase`](/reference/query-languages/query-dsl/query-dsl-match-query-phrase.md) query and other queries that rely on token position for matching. Avoid setting this parameter to `true` if you plan to use these queries.
::::
$$$word-delimiter-graph-tokenfilter-catenate-words$$$
`catenate_words`
: (Optional, Boolean) If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. For example: `super-duper-xl` → [ **`superduperxl`**, `super`, `duper`, `xl` ]. Defaults to `false`.
::::{warning}
Setting this parameter to `true` produces multi-position tokens, which are not supported by indexing.
If this parameter is `true`, avoid using this filter in an index analyzer or use the [`flatten_graph`](/reference/text-analysis/analysis-flatten-graph-tokenfilter.md) filter after this filter to make the token stream suitable for indexing.
When used for search analysis, catenated tokens can cause problems for the [`match_phrase`](/reference/query-languages/query-dsl/query-dsl-match-query-phrase.md) query and other queries that rely on token position for matching. Avoid setting this parameter to `true` if you plan to use these queries.
::::
`generate_number_parts`
: (Optional, Boolean) If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.
`generate_word_parts`
: (Optional, Boolean) If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.
`ignore_keywords`
: (Optional, Boolean) If `true`, the filter skips tokens with a `keyword` attribute of `true`. Defaults to `false`.
$$$word-delimiter-graph-tokenfilter-preserve-original$$$
`preserve_original`
: (Optional, Boolean) If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. For example: `super-duper-xl-500` → [ **`super-duper-xl-500`**, `super`, `duper`, `xl`, `500` ]. Defaults to `false`.
::::{warning}
Setting this parameter to `true` produces multi-position tokens, which are not supported by indexing.
If this parameter is `true`, avoid using this filter in an index analyzer or use the [`flatten_graph`](/reference/text-analysis/analysis-flatten-graph-tokenfilter.md) filter after this filter to make the token stream suitable for indexing.
::::
`protected_words`
: (Optional, array of strings) Array of tokens the filter won’t split.
`protected_words_path`
: (Optional, string) Path to a file that contains a list of tokens the filter won’t split.
This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.
`split_on_case_change`
: (Optional, Boolean) If `true`, the filter splits tokens at letter case transitions. For example: `camelCase` → [ `camel`, `Case` ]. Defaults to `true`.
`split_on_numerics`
: (Optional, Boolean) If `true`, the filter splits tokens at letter-number transitions. For example: `j2se` → [ `j`, `2`, `se` ]. Defaults to `true`.
`stem_english_possessive`
: (Optional, Boolean) If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: `O'Neil's` → [ `O`, `Neil` ]. Defaults to `true`.
`type_table`
: (Optional, array of strings) Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.
For example, the following array maps the plus (`+`) and hyphen (`-`) characters as alphanumeric, which means they won’t be treated as delimiters:
`[ "+ => ALPHA", "- => ALPHA" ]`
Supported types include:
* `ALPHA` (Alphabetical)
* `ALPHANUM` (Alphanumeric)
* `DIGIT` (Numeric)
* `LOWER` (Lowercase alphabetical)
* `SUBWORD_DELIM` (Non-alphanumeric delimiter)
* `UPPER` (Uppercase alphabetical)
`type_table_path`
: (Optional, string) Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.
For example, the contents of this file may contain the following:
```txt
# Map the $, %, '.', and ',' characters to DIGIT
# This might be useful for financial data.
$ => DIGIT
% => DIGIT
. => DIGIT
\\u002C => DIGIT
# in some cases you might not want to split on ZWJ
# this also tests the case where we need a bigger byte[]
# see https://en.wikipedia.org/wiki/Zero-width_joiner
\\u200D => ALPHANUM
```
Supported types include:
* `ALPHA` (Alphabetical)
* `ALPHANUM` (Alphanumeric)
* `DIGIT` (Numeric)
* `LOWER` (Lowercase alphabetical)
* `SUBWORD_DELIM` (Non-alphanumeric delimiter)
* `UPPER` (Uppercase alphabetical)
This file path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each mapping in the file must be separated by a line break.
## Customize [analysis-word-delimiter-graph-tokenfilter-customize]
To customize the `word_delimiter_graph` filter, duplicate it to create the basis for a new custom token filter. You can modify the filter using its configurable parameters.
For example, the following request creates a `word_delimiter_graph` filter that uses the following rules:
* Split tokens at non-alphanumeric characters, *except* the hyphen (`-`) character.
* Remove leading or trailing delimiters from each token.
* Do *not* split tokens at letter case transitions.
* Do *not* split tokens at letter-number transitions.
* Remove the English possessive (`'s`) from the end of each token.
```console
PUT /my-index-000001
{
"settings": {
"analysis": {
"analyzer": {
"my_analyzer": {
"tokenizer": "keyword",
"filter": [ "my_custom_word_delimiter_graph_filter" ]
}
},
"filter": {
"my_custom_word_delimiter_graph_filter": {
"type": "word_delimiter_graph",
"type_table": [ "- => ALPHA" ],
"split_on_case_change": false,
"split_on_numerics": false,
"stem_english_possessive": true
}
}
}
}
}
```
## Differences between `word_delimiter_graph` and `word_delimiter` [analysis-word-delimiter-graph-differences]
Both the `word_delimiter_graph` and [`word_delimiter`](/reference/text-analysis/analysis-word-delimiter-tokenfilter.md) filters produce tokens that span multiple positions when any of the following parameters are `true`:
* [`catenate_all`](#word-delimiter-graph-tokenfilter-catenate-all)
* [`catenate_numbers`](#word-delimiter-graph-tokenfilter-catenate-numbers)
* [`catenate_words`](#word-delimiter-graph-tokenfilter-catenate-words)
* [`preserve_original`](#word-delimiter-graph-tokenfilter-preserve-original)
However, only the `word_delimiter_graph` filter assigns multi-position tokens a `positionLength` attribute, which indicates the number of positions a token spans. This ensures the `word_delimiter_graph` filter always produces valid [token graphs](docs-content://manage-data/data-store/text-analysis/token-graphs.md).
The `word_delimiter` filter does not assign multi-position tokens a `positionLength` attribute. This means it produces invalid graphs for streams including these tokens.
While indexing does not support token graphs containing multi-position tokens, queries, such as the [`match_phrase`](/reference/query-languages/query-dsl/query-dsl-match-query-phrase.md) query, can use these graphs to generate multiple sub-queries from a single query string.
To see how token graphs produced by the `word_delimiter` and `word_delimiter_graph` filters differ, check out the following example.
:::::{dropdown} Example
$$$analysis-word-delimiter-graph-basic-token-graph$$$
**Basic token graph**
Both the `word_delimiter` and `word_delimiter_graph` produce the following token graph for `PowerShot2000` when the following parameters are `false`:
* [`catenate_all`](#word-delimiter-graph-tokenfilter-catenate-all)
* [`catenate_numbers`](#word-delimiter-graph-tokenfilter-catenate-numbers)
* [`catenate_words`](#word-delimiter-graph-tokenfilter-catenate-words)
* [`preserve_original`](#word-delimiter-graph-tokenfilter-preserve-original)
This graph does not contain multi-position tokens. All tokens span only one position.
:::{image} images/token-graph-basic.svg
:alt: token graph basic
:::
$$$analysis-word-delimiter-graph-wdg-token-graph$$$
**`word_delimiter_graph` graph with a multi-position token**
The `word_delimiter_graph` filter produces the following token graph for `PowerShot2000` when `catenate_words` is `true`.
This graph correctly indicates the catenated `PowerShot` token spans two positions.
:::{image} images/token-graph-wdg.svg
:alt: token graph wdg
:::
$$$analysis-word-delimiter-graph-wd-token-graph$$$
**`word_delimiter` graph with a multi-position token**
When `catenate_words` is `true`, the `word_delimiter` filter produces the following token graph for `PowerShot2000`.
Note that the catenated `PowerShot` token should span two positions but only spans one in the token graph, making it invalid.
:::{image} images/token-graph-wd.svg
:alt: token graph wd
:::
::::: | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/text-analysis/analysis-word-delimiter-graph-tokenfilter.md |
## Input
```javascript
import {useHook} from 'shared-runtime';
function Component(props) {
const data = useHook();
const items = [];
// NOTE: `item` is a context variable because it's reassigned and also referenced
// within a closure, the `onClick` handler of each item
for (let key in props.data) {
key = key ?? null; // no-op reassignment to force a context variable
items.push(
<div key={key} onClick={() => data.set(key)}>
{key}
</div>
);
}
return <div>{items}</div>;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{data: {a: 'a', b: true, c: 'hello'}}],
};
```
## Error
```
Found 1 error:
Todo: Support non-trivial for..in inits
error.todo-for-in-loop-with-context-variable-iterator.ts:8:2
6 | // NOTE: `item` is a context variable because it's reassigned and also referenced
7 | // within a closure, the `onClick` handler of each item
> 8 | for (let key in props.data) {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 9 | key = key ?? null; // no-op reassignment to force a context variable
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 10 | items.push(
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 11 | <div key={key} onClick={() => data.set(key)}>
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 12 | {key}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 13 | </div>
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 14 | );
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 15 | }
| ^^^^ Support non-trivial for..in inits
16 | return <div>{items}</div>;
17 | }
18 |
``` | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.todo-for-in-loop-with-context-variable-iterator.expect.md |
# -*- coding: utf-8 -*-
"""
For a massive matrix of colors and color labels you can download
the follow two files
# http://lyst-classifiers.s3.amazonaws.com/color/lab-colors.pk
# http://lyst-classifiers.s3.amazonaws.com/color/lab-matrix.pk
lab-colors is a cPickled list of color names and lab-matrix is a
cPickled (n,3) numpy array LAB values such that row q maps to
index q in the lab color list
"""
import sys
import csv
import bz2
import numpy as np
# Does some sys.path manipulation so we can run examples in-place.
# noinspection PyUnresolvedReferences
import example_config # noqa
from colormath.color_diff_matrix import delta_e_cie2000
from colormath.color_objects import LabColor
# load list of 1000 random colors from the XKCD color chart
if sys.version_info >= (3, 0):
reader = csv.DictReader(bz2.open("lab_matrix.csv.bz2", mode="rt"))
lab_matrix = np.array([list(map(float, row.values())) for row in reader])
else:
reader = csv.DictReader(bz2.BZ2File("lab_matrix.csv.bz2"))
lab_matrix = np.array([map(float, row.values()) for row in reader])
color = LabColor(lab_l=69.34, lab_a=-0.88, lab_b=-52.57)
lab_color_vector = np.array([color.lab_l, color.lab_a, color.lab_b])
delta = delta_e_cie2000(lab_color_vector, lab_matrix)
print("%s is closest to %s" % (color, lab_matrix[np.argmin(delta)])) | unknown | codeparrot/codeparrot-clean | ||
import subprocess
import sys
import os
def start(args, logfile, errfile):
if os.name == 'nt':
subprocess.call("set GOPATH=C:\\FrameworkBenchmarks\\webgo&&go get ./...", shell=True, cwd="webgo", stderr=errfile, stdout=logfile)
subprocess.Popen("setup.bat", shell=True, cwd="webgo", stderr=errfile, stdout=logfile)
return 0
os.environ["GOPATH"] = os.path.expanduser('~/FrameworkBenchmarks/webgo')
subprocess.call("go get ./...", shell=True, cwd="webgo", stderr=errfile, stdout=logfile)
subprocess.Popen("go run src/hello/hello.go".rsplit(" "), cwd="webgo", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.call("taskkill /f /im go.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("taskkill /f /im hello.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
return 0
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'hello' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
return 0 | unknown | codeparrot/codeparrot-clean | ||
libfpconv
----------------------------------------------
Fast and accurate double to string conversion based on Florian Loitsch's Grisu-algorithm[1].
This port contains a subset of the 'C' version of Fast and accurate double to string conversion based on Florian Loitsch's Grisu-algorithm available at [github.com/night-shift/fpconv](https://github.com/night-shift/fpconv)).
[1] https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf | unknown | github | https://github.com/redis/redis | deps/fpconv/README.md |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "org_apache_hadoop.h"
#include "jni_common.h"
#include "isal_load.h"
#include "org_apache_hadoop_io_erasurecode_ErasureCodeNative.h"
#ifdef UNIX
#include "config.h"
#endif
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_erasurecode_ErasureCodeNative_loadLibrary
(JNIEnv *env, jclass myclass) {
loadLib(env);
}
JNIEXPORT jstring JNICALL
Java_org_apache_hadoop_io_erasurecode_ErasureCodeNative_getLibraryName
(JNIEnv *env, jclass myclass) {
if (isaLoader == NULL) {
THROW(env, "java/lang/UnsatisfiedLinkError",
"Unavailable: library not loaded yet");
return (jstring)NULL;
}
return (*env)->NewStringUTF(env, isaLoader->libname);
} | c | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_erasure_code_native.c |
"""Module stub"""
import re
from madcow.util import Module, strip_html
from madcow.util.http import getsoup
from madcow.util.text import *
from urlparse import urljoin
class Main(Module):
pattern = re.compile(r'^\s*urban(?:\s+(.+?)(?:\s+(\d+))?)?\s*$', re.I)
help = 'urban <term> [#] - lookup word/phrase on urban dictionary'
error = u'So obscure, not even urban dictionary knows it'
urban_url = 'http://www.urbandictionary.com/'
urban_search = urljoin(urban_url, '/define.php')
urban_random = urljoin(urban_url, '/random.php')
entry_re = re.compile(r'entry_\d+')
newline_re = re.compile(r'(?:\r\n|[\r\n])')
RESULTS_PER_PAGE = 7
def response(self, nick, args, kwargs):
query, idx = args
try:
if query:
if idx:
idx = int(idx)
response = self.lookup(query, idx)
else:
response = self.random()
except Exception, error:
response = u"That doesn't even exist in urban dictionary, stop making stuff up."
return u'%s: %s' % (nick, response)
def lookup(self, query, idx=None):
"""Look up term on urban dictionary"""
if idx is None:
idx = 1
orig_idx = idx
page = int(idx / self.RESULTS_PER_PAGE)
idx = (idx % self.RESULTS_PER_PAGE) - 1
if idx == -1:
idx = 6
soup = getsoup(self.urban_search, {'term': query, 'page': page},
referer=self.urban_url)
return self.parse(soup, idx, page, orig_idx)
def random(self):
"""Get a random definition"""
soup = getsoup(self.urban_random, referer=self.urban_url)
return self.parse(soup)
def parse(self, soup, idx=1, current_page=1, orig_idx=1):
"""Parse page for definition"""
# get definition
table = soup.body.find('table', id='entries')
word = self.render(table.find('td', 'word'))
entries = table('td', 'text', id=self.entry_re)
size = len(entries)
if not size:
raise ValueError('no results?')
if idx >= size:
idx = size - 1
entry = entries[idx]
# torturous logic to guess number of entries
pages = soup.body.find('div', 'pagination')
if pages is None:
total = len(entries)
else:
highest = 0
for a in pages('a'):
try:
page = int(a['href'].split('page=')[1])
except IndexError:
continue
if page > highest:
highest = page
if highest == current_page:
total = ((highest - 1) * self.RESULTS_PER_PAGE) + len(entries)
else:
total = highest * self.RESULTS_PER_PAGE
if orig_idx > total:
orig_idx = total
# construct page
result = u'[%d/%d] %s: ' % (orig_idx, total, word)
result += self.render(entry.find('div', 'definition'))
try:
example = self.render(entry.find('div', 'example'))
result += u' - Example: %s' % example
except:
pass
return result
@staticmethod
def render(node):
"""Render node to text"""
data = node.renderContents()
if isinstance(data, str):
data = decode(data, 'utf-8')
return Main.newline_re.sub(' ', strip_html(data)).strip() | unknown | codeparrot/codeparrot-clean | ||
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j E Y"
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "j E Y H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j E"
SHORT_DATE_FORMAT = "d-m-Y"
SHORT_DATETIME_FORMAT = "d-m-Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
"%d.%m.%y", # '25.10.06'
"%y-%m-%d", # '06-10-25'
# "%d. %B %Y", # '25. października 2006'
# "%d. %b. %Y", # '25. paź. 2006'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3 | python | github | https://github.com/django/django | django/conf/locale/pl/formats.py |
"""HDF5 tools tests."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import tempfile
import numpy as np
import tables as tb
from nose import with_setup
from kwiklib.utils.six import itervalues
from kwiklib.dataio.kwik import *
# -----------------------------------------------------------------------------
# Fixtures
# -----------------------------------------------------------------------------
DIRPATH = tempfile.mkdtemp()
def setup_create(create_default=False):
prm = {'nfeatures': 3, 'waveforms_nsamples': 20, 'has_masks': False,
'nchannels': 3}
prb = {0:
{
'channels': [4, 6, 8],
'graph': [[4, 6], [8, 4]],
'geometry': {4: [0.4, 0.6], 6: [0.6, 0.8], 8: [0.8, 0.0]},
}
}
create_files('myexperiment', dir=DIRPATH, prm=prm, prb=prb,
create_default_info=create_default)
def setup_create_default():
setup_create(True)
def teardown_create():
files = get_filenames('myexperiment', dir=DIRPATH)
[os.remove(path) for path in itervalues(files)]
# -----------------------------------------------------------------------------
# Filename tests
# -----------------------------------------------------------------------------
def test_get_filenames():
filenames = get_filenames('myexperiment')
assert os.path.basename(filenames['kwik']) == 'myexperiment.kwik'
assert os.path.basename(filenames['kwx']) == 'myexperiment.kwx'
assert os.path.basename(filenames['raw.kwd']) == 'myexperiment.raw.kwd'
assert os.path.basename(filenames['low.kwd']) == 'myexperiment.low.kwd'
assert os.path.basename(filenames['high.kwd']) == 'myexperiment.high.kwd'
def test_basename_1():
bn = 'myexperiment'
filenames = get_filenames(bn)
kwik = filenames['kwik']
kwx = filenames['kwx']
kwdraw = filenames['raw.kwd']
assert get_basename(kwik) == bn
assert get_basename(kwx) == bn
assert get_basename(kwdraw) == bn
def test_basename_2():
kwik = '/my/path/experiment.kwik'
kwx = '/my/path/experiment.kwx'
kwdhigh = '/my/path/experiment.high.kwd'
assert get_basename(kwik) == 'experiment'
assert get_basename(kwx) == 'experiment'
assert get_basename(kwdhigh) == 'experiment'
# -----------------------------------------------------------------------------
# HDF5 creation functions tests
# -----------------------------------------------------------------------------
def test_create_kwik():
path = os.path.join(DIRPATH, 'myexperiment.kwik')
prm = {
'waveforms_nsamples': 20,
'nfeatures': 3*32,
}
prb = {0:
{
'channels': [4, 6, 8],
'graph': [[4, 6], [8, 4]],
'geometry': {4: [0.4, 0.6], 6: [0.6, 0.8], 8: [0.8, 0.0]},
}
}
create_kwik(path, prm=prm, prb=prb)
f = tb.openFile(path, 'r')
channel = f.root.channel_groups.__getattr__('0').channels.__getattr__('4')
assert channel._v_attrs.name == 'channel_4'
f.close()
os.remove(path)
def test_create_kwx():
path = os.path.join(DIRPATH, 'myexperiment.kwx')
# Create the KWX file.
waveforms_nsamples = 20
nchannels = 32
nchannels2 = 24
nfeatures = 3*nchannels
prm = {
'waveforms_nsamples': waveforms_nsamples,
'nfeatures': 3*nchannels,
}
prb = {0:
{
'channels': np.arange(nchannels),
},
1: {
'channels': nchannels + np.arange(nchannels2),
'nfeatures': 3*nchannels2
},
2: {
'channels': nchannels + nchannels2 + np.arange(nchannels),
'nfeatures': 2*nchannels
},
}
create_kwx(path, prb=prb, prm=prm)
# Open the KWX file.
f = tb.openFile(path, 'r')
# Group 1
fm1 = f.root.channel_groups.__getattr__('1').features_masks
wr1 = f.root.channel_groups.__getattr__('1').waveforms_raw
wf1 = f.root.channel_groups.__getattr__('1').waveforms_filtered
assert fm1.shape[1:] == (3*nchannels2, 2)
assert wr1.shape[1:] == (waveforms_nsamples, nchannels2)
assert wf1.shape[1:] == (waveforms_nsamples, nchannels2)
# Group 2
fm2 = f.root.channel_groups.__getattr__('2').features_masks
wr2 = f.root.channel_groups.__getattr__('2').waveforms_raw
wf2 = f.root.channel_groups.__getattr__('2').waveforms_filtered
assert fm2.shape[1:] == (2*nchannels, 2)
assert wr2.shape[1:] == (waveforms_nsamples, nchannels)
assert wf2.shape[1:] == (waveforms_nsamples, nchannels)
f.close()
# Delete the file.
os.remove(path)
def test_create_kwd():
path = os.path.join(DIRPATH, 'myexperiment.raw.kwd')
# Create the KWD file.
nchannels_tot = 32*3
prm = {'nchannels': nchannels_tot}
create_kwd(path, type='raw', prm=prm,)
# Open the KWX file.
f = tb.openFile(path, 'r')
assert f.root.recordings
f.close()
# Delete the file.
os.remove(path)
def test_create_empty():
files = create_files('myexperiment', dir=DIRPATH)
[os.remove(path) for path in itervalues(files)]
@with_setup(setup_create_default, teardown_create)
def test_create_default():
path = os.path.join(DIRPATH, 'myexperiment.kwik')
prm = {
'waveforms_nsamples': 20,
'nfeatures': 3*32,
}
prb = {0:
{
'channels': [4, 6, 8],
'graph': [[4, 6], [8, 4]],
'geometry': {4: [0.4, 0.6], 6: [0.6, 0.8], 8: [0.8, 0.0]},
}
}
files = open_files('myexperiment', dir=DIRPATH)
f = files['kwik']
assert f.root.channel_groups.__getattr__('0').cluster_groups.main.__getattr__('0')._f_getAttr('name') == 'Noise'
assert hasattr(f.root.channel_groups.__getattr__('0').clusters.main, '0')
close_files(files)
# -----------------------------------------------------------------------------
# Item creation functions tests
# -----------------------------------------------------------------------------
@with_setup(setup_create, teardown_create)
def test_add_recording():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
sample_rate = 20000.
start_time = 10.
start_sample = 200000.
bit_depth = 16
band_high = 100.
band_low = 500.
nchannels = 32
nsamples = 0
add_recording(files,
sample_rate=sample_rate,
start_time=start_time,
start_sample=start_sample,
bit_depth=bit_depth,
band_high=band_high,
band_low=band_low,
nchannels=nchannels,
nsamples=nsamples,
)
rec = files['kwik'].root.recordings.__getattr__('0')
assert rec._v_attrs.sample_rate == sample_rate
assert rec._v_attrs.start_time == start_time
assert rec._v_attrs.start_sample == start_sample
assert rec._v_attrs.bit_depth == bit_depth
assert rec._v_attrs.band_high == band_high
assert rec._v_attrs.band_low == band_low
close_files(files)
@with_setup(setup_create, teardown_create)
def test_add_event_type():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
add_event_type(files, 'myevents')
events = files['kwik'].root.event_types.myevents.events
assert isinstance(events.time_samples, tb.EArray)
assert isinstance(events.recording, tb.EArray)
events.user_data
close_files(files)
@with_setup(setup_create, teardown_create)
def test_add_cluster_group():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
add_cluster_group(files, channel_group_id='0', id='0', name='Noise')
noise = files['kwik'].root.channel_groups.__getattr__('0').cluster_groups.main.__getattr__('0')
assert noise._v_attrs.name == 'Noise'
noise.application_data.klustaviewa._v_attrs.color
noise.user_data
remove_cluster_group(files, channel_group_id='0', id='0')
assert not hasattr(
files['kwik'].root.channel_groups.__getattr__('0').cluster_groups.main,
'0')
close_files(files)
@with_setup(setup_create, teardown_create)
def test_add_cluster():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
add_cluster(files, channel_group_id='0',)
cluster = files['kwik'].root.channel_groups.__getattr__('0').clusters.main.__getattr__('0')
cluster._v_attrs.cluster_group
cluster._v_attrs.mean_waveform_raw
cluster._v_attrs.mean_waveform_filtered
cluster.quality_measures
cluster.application_data.klustaviewa._v_attrs.color
cluster.user_data
remove_cluster(files, channel_group_id='0', id='0')
assert not hasattr(
files['kwik'].root.channel_groups.__getattr__('0').clusters.main,
'0')
close_files(files)
@with_setup(setup_create, teardown_create)
def test_add_clustering():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
nspikes = 100
add_spikes(files, channel_group_id='0',
time_samples=np.arange(nspikes),
features=np.random.randn(nspikes, 3),
fill_empty=False,
)
spike_clusters = np.random.randint(size=nspikes, low=3, high=20)
add_clustering(files, name='myclustering', spike_clusters=spike_clusters)
clusters = files['kwik'].root.channel_groups.__getattr__('0').spikes.clusters.myclustering[:]
assert np.allclose(spike_clusters, clusters)
clustering = files['kwik'].root.channel_groups.__getattr__('0').clusters.myclustering
assert not hasattr(clustering, '0')
for i in np.unique(spike_clusters):
assert clustering.__getattr__(str(i)).application_data. \
klustaviewa._f_getAttr('color') > 0
close_files(files)
@with_setup(setup_create, teardown_create)
def test_add_clustering_overwrite():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
nspikes = 100
add_spikes(files, channel_group_id='0',
time_samples=np.arange(nspikes),
features=np.random.randn(nspikes, 3),
fill_empty=False,
)
spike_clusters = np.random.randint(size=nspikes, low=3, high=20)
add_clustering(files, name='main', spike_clusters=spike_clusters,
overwrite=True)
clusters = files['kwik'].root.channel_groups.__getattr__('0').spikes.clusters.main[:]
assert np.allclose(spike_clusters, clusters)
clustering = files['kwik'].root.channel_groups.__getattr__('0').clusters.main
assert not hasattr(clustering, '0')
for i in np.unique(spike_clusters):
assert clustering.__getattr__(str(i)).application_data. \
klustaviewa._f_getAttr('color') > 0
close_files(files)
@with_setup(setup_create, teardown_create)
def test_add_spikes():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
nspikes = 7
add_spikes(files, channel_group_id='0',
time_samples=1,
)
add_spikes(files, channel_group_id='0',
time_samples=np.arange(1),
)
add_spikes(files, channel_group_id='0',
time_samples=np.arange(2),
masks=np.random.randn(2, 3),
)
add_spikes(files, channel_group_id='0',
time_samples=np.arange(2),
waveforms_raw=np.random.randn(2, 20, 3),
)
add_spikes(files, channel_group_id='0',
time_samples=4,
waveforms_raw=np.random.randn(20, 3),
waveforms_filtered=np.random.randn(20, 3),
)
spikes = files['kwx'].root.channel_groups.__getattr__('0')
assert spikes.waveforms_raw.shape == (nspikes, 20, 3)
assert spikes.waveforms_filtered.shape == (nspikes, 20, 3)
close_files(files)
@with_setup(setup_create, teardown_create)
def test_add_spikes_fm():
files = open_files('myexperiment', dir=DIRPATH, mode='a')
nspikes = 7
add_spikes(files, channel_group_id='0',
time_samples=np.arange(nspikes),
features=np.random.randn(nspikes, 3),
fill_empty=False,
)
spikes = files['kwx'].root.channel_groups.__getattr__('0')
assert spikes.waveforms_raw.shape == (0, 20, 3)
assert spikes.waveforms_filtered.shape == (0, 20, 3)
assert spikes.features_masks.shape == (nspikes, 3)
close_files(files)
@with_setup(setup_create, teardown_create)
def test_to_contiguous():
"""Convert an EArray to contiguous Array."""
files = open_files('myexperiment', dir=DIRPATH, mode='a')
n = 100000
fm = files['kwx'].root.channel_groups.__getattr__('0').features_masks
s = fm.shape[1:]
a = fm.atom
X = np.random.rand(n, *s)
fm.append(X)
assert isinstance(fm, tb.EArray)
assert fm.shape[0] == n
assert fm.shape[1:] == s
assert fm.atom == a
to_contiguous(fm, nspikes=n)
fm = files['kwx'].root.channel_groups.__getattr__('0').features_masks
assert isinstance(fm, tb.Array) and not isinstance(fm, tb.EArray)
assert fm.shape[0] == n
assert fm.shape[1:] == s
assert fm.atom == a
Y = fm[...]
assert np.allclose(X, Y)
close_files(files) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/arrayTypeInSignatureOfInterfaceAndClass.ts] ////
//// [arrayTypeInSignatureOfInterfaceAndClass.ts]
declare namespace WinJS {
class Promise<T> {
then<U>(success?: (value: T) => Promise<U>, error?: (error: any) => Promise<U>, progress?: (progress: any) => void): Promise<U>;
}
}
declare namespace Data {
export interface IListItem<T> {
itemIndex: number;
key: any;
data: T;
group: any;
isHeader: boolean;
cached: boolean;
isNonSourceData: boolean;
preventAugmentation: boolean;
}
export interface IVirtualList<T> {
//removeIndices: WinJS.Promise<IListItem<T>[]>;
removeIndices(indices: number[], options?: any): WinJS.Promise<IListItem<T>[]>;
}
export class VirtualList<T> implements IVirtualList<T> {
//removeIndices: WinJS.Promise<IListItem<T>[]>;
public removeIndices(indices: number[], options?: any): WinJS.Promise<IListItem<T>[]>;
}
}
//// [arrayTypeInSignatureOfInterfaceAndClass.js]
"use strict"; | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/arrayTypeInSignatureOfInterfaceAndClass.js |
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import subprocess
import sys
import tempfile
import re
import logging
import unittest
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32 # type: ignore
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
TEST_FRAMEWORK_MODULES = [
"address",
"blocktools",
"muhash",
"key",
"script",
"segwit_addr",
"util",
]
EXTENDED_SCRIPTS = [
# These tests are not run by default.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
BASE_SCRIPTS = [
# Scripts that are run by default.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py --legacy-wallet',
'wallet_hd.py --descriptors',
'wallet_backup.py --legacy-wallet',
'wallet_backup.py --descriptors',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py --legacy-wallet',
'rpc_fundrawtransaction.py --descriptors',
'p2p_compactblocks.py',
'feature_segwit.py --legacy-wallet',
# vv Tests less than 2m vv
'wallet_basic.py --legacy-wallet',
'wallet_basic.py --descriptors',
'wallet_labels.py --legacy-wallet',
'wallet_labels.py --descriptors',
'p2p_segwit.py',
'p2p_timeouts.py',
'p2p_tx_download.py',
'mempool_updatefromblock.py',
'wallet_dump.py --legacy-wallet',
'wallet_listtransactions.py --legacy-wallet',
'wallet_listtransactions.py --descriptors',
'feature_taproot.py',
'rpc_signer.py',
'wallet_signer.py --descriptors',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_importmulti.py --legacy-wallet',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py --legacy-wallet',
'wallet_listreceivedby.py --descriptors',
'wallet_abandonconflict.py --legacy-wallet',
'wallet_abandonconflict.py --descriptors',
'feature_csv_activation.py',
'rpc_rawtransaction.py --legacy-wallet',
'rpc_rawtransaction.py --descriptors',
'wallet_address_types.py --legacy-wallet',
'wallet_address_types.py --descriptors',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
'feature_abortnode.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py --legacy-wallet',
'wallet_keypool_topup.py --descriptors',
'feature_fee_estimation.py',
'interface_zmq.py',
'rpc_invalid_address_message.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py --legacy-wallet',
'tool_wallet.py --descriptors',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'wallet_avoidreuse.py --legacy-wallet',
'wallet_avoidreuse.py --descriptors',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py --legacy-wallet',
'wallet_multiwallet.py --descriptors',
'wallet_multiwallet.py --usecli',
'wallet_createwallet.py --legacy-wallet',
'wallet_createwallet.py --usecli',
'wallet_createwallet.py --descriptors',
'wallet_watchonly.py --legacy-wallet',
'wallet_watchonly.py --usecli --legacy-wallet',
'wallet_reorgsrestore.py',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py --legacy-wallet',
'rpc_psbt.py --descriptors',
'rpc_users.py',
'rpc_whitelist.py',
'feature_proxy.py',
'rpc_signrawtransaction.py --legacy-wallet',
'rpc_signrawtransaction.py --descriptors',
'wallet_groups.py --legacy-wallet',
'p2p_addrv2_relay.py',
'wallet_groups.py --descriptors',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py --legacy-wallet',
'wallet_disable.py --descriptors',
'p2p_addr_relay.py',
'p2p_getaddr_caching.py',
'p2p_getdata.py',
'rpc_net.py',
'wallet_keypool.py --legacy-wallet',
'wallet_keypool.py --descriptors',
'wallet_descriptor.py --descriptors',
'p2p_nobloomfilter_messages.py',
'p2p_filter.py',
'rpc_setban.py',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py --legacy-wallet',
'wallet_txn_doublespend.py --descriptors',
'feature_backwards_compatibility.py --legacy-wallet',
'feature_backwards_compatibility.py --descriptors',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_getblockfilter.py',
'rpc_invalidateblock.py',
'feature_utxo_set_hash.py',
'feature_rbf.py',
'mempool_packages.py',
'mempool_package_onemore.py',
'rpc_createmultisig.py --legacy-wallet',
'rpc_createmultisig.py --descriptors',
'rpc_packages.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py --legacy-wallet',
'wallet_importprunedfunds.py --descriptors',
'p2p_leak_tx.py',
'p2p_eviction.py',
'rpc_signmessage.py',
'rpc_generateblock.py',
'rpc_generate.py',
'wallet_balance.py --legacy-wallet',
'wallet_balance.py --descriptors',
'feature_nulldummy.py --legacy-wallet',
'feature_nulldummy.py --descriptors',
'mempool_accept.py',
'mempool_expiry.py',
'wallet_import_rescan.py --legacy-wallet',
'wallet_import_with_label.py --legacy-wallet',
'wallet_importdescriptors.py --descriptors',
'wallet_upgradewallet.py --legacy-wallet',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'feature_signet.py',
'wallet_bumpfee.py --legacy-wallet',
'wallet_bumpfee.py --descriptors',
'wallet_implicitsegwit.py --legacy-wallet',
'rpc_named_arguments.py',
'wallet_listsinceblock.py --legacy-wallet',
'wallet_listsinceblock.py --descriptors',
'wallet_listdescriptors.py --descriptors',
'p2p_leak.py',
'wallet_encryption.py --legacy-wallet',
'wallet_encryption.py --descriptors',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py --legacy-wallet',
'wallet_resendwallettransactions.py --descriptors',
'wallet_fallbackfee.py --legacy-wallet',
'wallet_fallbackfee.py --descriptors',
'rpc_dumptxoutset.py',
'feature_minchainwork.py',
'rpc_estimatefee.py',
'rpc_getblockstats.py',
'wallet_create_tx.py --legacy-wallet',
'wallet_send.py --legacy-wallet',
'wallet_send.py --descriptors',
'wallet_create_tx.py --descriptors',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py --legacy-wallet',
'wallet_coinbase_category.py --descriptors',
'feature_filelock.py',
'feature_loadblock.py',
'p2p_dos_header_tree.py',
'p2p_add_connections.py',
'p2p_unrequested_blocks.py',
'p2p_blockfilters.py',
'p2p_message_capture.py',
'feature_includeconf.py',
'feature_asmap.py',
'mempool_unbroadcast.py',
'mempool_compatibility.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'p2p_ping.py',
'rpc_scantxoutset.py',
'feature_logging.py',
'feature_anchors.py',
'feature_coinstatsindex.py',
'p2p_node_network_limited.py',
'p2p_permissions.py',
'feature_blocksdir.py',
'wallet_startup.py',
'feature_config_args.py',
'feature_settings.py',
'rpc_getdescriptorinfo.py',
'rpc_addresses_deprecation.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
'p2p_ibd_txrelay.py',
'feature_blockfilterindex_prune.py'
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--ansi', action='store_true', default=sys.stdout.isatty(), help="Use ANSI colors and dots in output (enabled by default when standard output is a TTY)")
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
parser.add_argument('--filter', help='filter scripts to run by regular expression')
args, unknown_args = parser.parse_known_args()
if not args.ansi:
global BOLD, GREEN, RED, GREY
BOLD = ("", "")
GREEN = ("", "")
RED = ("", "")
GREY = ("", "")
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_₿_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept names with or without a .py extension.
# Specified tests can contain wildcards, but in that case the supplied
# paths should be coherent, e.g. the same path as that provided to call
# test_runner.py. Examples:
# `test/functional/test_runner.py test/functional/wallet*`
# `test/functional/test_runner.py ./test/functional/wallet*`
# `test_runner.py wallet*`
# but not:
# `test/functional/test_runner.py wallet*`
# Multiple wildcards can be passed:
# `test_runner.py tool* mempool*`
for test in tests:
script = test.split("/")[-1]
script = script + ".py" if ".py" not in script else script
if script in ALL_SCRIPTS:
test_list.append(script)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if args.filter:
test_list = list(filter(re.compile(args.filter).search, test_list))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
use_term_control=args.ansi,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control):
args = args or []
# Warn if bitcoind is already running
try:
# pgrep exits with code zero when one or more matching processes found
if subprocess.run(["pgrep", "-x", "bitcoind"], stdout=subprocess.DEVNULL).returncode == 0:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except OSError:
# pgrep not supported
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
# Test Framework Tests
print("Running Unit Tests for Test Framework Modules")
test_framework_tests = unittest.TestSuite()
for module in TEST_FRAMEWORK_MODULES:
test_framework_tests.addTest(unittest.TestLoader().loadTestsFromName("test_framework.{}".format(module)))
result = unittest.TextTestRunner(verbosity=1, failfast=True).run(test_framework_tests)
if not result.wasSuccessful():
logging.debug("Early exiting after failure in TestFramework unit tests")
sys.exit(False)
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
use_term_control=use_term_control,
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage_passed = coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
else:
coverage_passed = True
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) and coverage_passed
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, use_term_control):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
self.use_term_control = use_term_control
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
# Print remaining running jobs when all jobs have been started.
if not self.test_list:
print("Remaining jobs: [{}]".format(", ".join(j[0] for j in self.jobs)))
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
if self.use_term_control:
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
if self.use_term_control:
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("^(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On CI this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
return False
else:
print("All RPC commands covered.")
return True
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test_framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
# Consider RPC generate covered, because it is overloaded in
# test_framework/test_node.py and not seen by the coverage check.
covered_cmds = set({'generate'})
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Create a custom architecture
[`AutoClass`](model_doc/auto)は、モデルのアーキテクチャを自動的に推論し、事前学習済みの設定と重みをダウンロードします。一般的には、チェックポイントに依存しないコードを生成するために`AutoClass`を使用することをお勧めします。ただし、特定のモデルパラメータに対する制御をより詳細に行いたいユーザーは、いくつかの基本クラスからカスタム🤗 Transformersモデルを作成できます。これは、🤗 Transformersモデルを研究、トレーニング、または実験する興味があるユーザーに特に役立つかもしれません。このガイドでは、`AutoClass`を使用しないカスタムモデルの作成について詳しく説明します。次の方法を学びます:
- モデルの設定をロードおよびカスタマイズする。
- モデルアーキテクチャを作成する。
- テキスト用の遅いトークナイザと高速トークナイザを作成する。
- ビジョンタスク用の画像プロセッサを作成する。
- オーディオタスク用の特徴抽出器を作成する。
- マルチモーダルタスク用のプロセッサを作成する。
## Configuration
[設定](main_classes/configuration)は、モデルの特定の属性を指します。各モデルの設定には異なる属性があります。たとえば、すべてのNLPモデルには、`hidden_size`、`num_attention_heads`、`num_hidden_layers`、および`vocab_size`属性が共通してあります。これらの属性は、モデルを構築するための注意ヘッドの数や隠れ層の数を指定します。
[DistilBERT](model_doc/distilbert)をより詳しく調べるために、[`DistilBertConfig`]にアクセスしてその属性を調べてみましょう:
```py
>>> from transformers import DistilBertConfig
>>> config = DistilBertConfig()
>>> print(config)
DistilBertConfig {
"activation": "gelu",
"attention_dropout": 0.1,
"dim": 768,
"dropout": 0.1,
"hidden_dim": 3072,
"initializer_range": 0.02,
"max_position_embeddings": 512,
"model_type": "distilbert",
"n_heads": 12,
"n_layers": 6,
"pad_token_id": 0,
"qa_dropout": 0.1,
"seq_classif_dropout": 0.2,
"sinusoidal_pos_embds": false,
"transformers_version": "4.16.2",
"vocab_size": 30522
}
```
[`DistilBertConfig`]は、基本の[`DistilBertModel`]を構築するために使用されるすべてのデフォルト属性を表示します。
すべての属性はカスタマイズ可能で、実験のためのスペースを提供します。例えば、デフォルトのモデルをカスタマイズして以下のようなことができます:
- `activation`パラメータで異なる活性化関数を試す。
- `attention_dropout`パラメータで注意確率の高いドロップアウト率を使用する。
```py
>>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4)
>>> print(my_config)
DistilBertConfig {
"activation": "relu",
"attention_dropout": 0.4,
"dim": 768,
"dropout": 0.1,
"hidden_dim": 3072,
"initializer_range": 0.02,
"max_position_embeddings": 512,
"model_type": "distilbert",
"n_heads": 12,
"n_layers": 6,
"pad_token_id": 0,
"qa_dropout": 0.1,
"seq_classif_dropout": 0.2,
"sinusoidal_pos_embds": false,
"transformers_version": "4.16.2",
"vocab_size": 30522
}
```
事前学習済みモデルの属性は、[`~PreTrainedConfig.from_pretrained`] 関数で変更できます:
```py
>>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4)
```
Once you are satisfied with your model configuration, you can save it with [`PreTrainedConfig.save_pretrained`]. Your configuration file is stored as a JSON file in the specified save directory.
```py
>>> my_config.save_pretrained(save_directory="./your_model_save_path")
```
設定ファイルを再利用するには、[`~PreTrainedConfig.from_pretrained`]を使用してそれをロードします:
```py
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
```
<Tip>
カスタム構成ファイルを辞書として保存することも、カスタム構成属性とデフォルトの構成属性の違いだけを保存することもできます!詳細については[configuration](main_classes/configuration)のドキュメンテーションをご覧ください。
</Tip>
## Model
次のステップは、[モデル](main_classes/models)を作成することです。モデル(アーキテクチャとも緩く言われることがあります)は、各レイヤーが何をしているか、どの操作が行われているかを定義します。構成からの `num_hidden_layers` のような属性はアーキテクチャを定義するために使用されます。
すべてのモデルは [`PreTrainedModel`] をベースクラスとし、入力埋め込みのリサイズやセルフアテンションヘッドのプルーニングなど、共通のメソッドがいくつかあります。
さらに、すべてのモデルは [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)、または [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) のいずれかのサブクラスでもあります。つまり、モデルはそれぞれのフレームワークの使用法と互換性があります。
モデルにカスタム構成属性をロードします:
```py
>>> from transformers import DistilBertModel
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
>>> model = DistilBertModel(my_config)
```
これにより、事前トレーニング済みの重みではなくランダムな値を持つモデルが作成されます。
これは、トレーニングが行われるまで、まだ有用なものとして使用することはできません。
トレーニングはコストと時間がかかるプロセスです。
通常、トレーニングに必要なリソースの一部しか使用せず、より速くより良い結果を得るために事前学習済みモデルを使用することが良いでしょう。
[`~PreTrainedModel.from_pretrained`]を使用して事前学習済みモデルを作成します:
```py
>>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased")
```
事前学習済みの重みをロードする際、モデルが🤗 Transformersによって提供されている場合、デフォルトのモデル設定が自動的にロードされます。ただし、必要に応じてデフォルトのモデル設定属性の一部またはすべてを独自のもので置き換えることができます。
```py
>>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config)
```
### Model heads
この時点で、ベースのDistilBERTモデルがあり、これは隠れた状態を出力します。隠れた状態はモデルのヘッドへの入力として渡され、最終的な出力を生成します。🤗 Transformersは、モデルがそのタスクをサポートしている限り、各タスクに対応する異なるモデルヘッドを提供します(つまり、DistilBERTを翻訳のようなシーケンス対シーケンスタスクに使用することはできません)。
たとえば、[`DistilBertForSequenceClassification`]は、シーケンス分類ヘッドを持つベースのDistilBERTモデルです。シーケンス分類ヘッドは、プールされた出力の上にある線形層です。
```py
>>> from transformers import DistilBertForSequenceClassification
>>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
```
新しいタスクにこのチェックポイントを簡単に再利用するには、異なるモデルヘッドに切り替えます。
質問応答タスクの場合、[`DistilBertForQuestionAnswering`] モデルヘッドを使用します。
質問応答ヘッドはシーケンス分類ヘッドと類似していますが、隠れ状態の出力の上に線形層があります。
```py
>>> from transformers import DistilBertForQuestionAnswering
>>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased")
```
## Tokenizer
テキストデータをモデルで使用する前に必要な最後のベースクラスは、生のテキストをテンソルに変換するための[トークナイザ](main_classes/tokenizer)です。
🤗 Transformersで使用できる2つのタイプのトークナイザがあります:
- [`PreTrainedTokenizer`]: トークナイザのPython実装です。
- [`PreTrainedTokenizerFast`]: Rustベースの[🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/)ライブラリからのトークナイザです。
このトークナイザのタイプは、そのRust実装により、特にバッチトークナイゼーション中に高速です。
高速なトークナイザは、トークンを元の単語または文字にマッピングする*オフセットマッピング*などの追加メソッドも提供します。
両方のトークナイザは、エンコードとデコード、新しいトークンの追加、特別なトークンの管理など、共通のメソッドをサポートしています。
<Tip warning={true}>
すべてのモデルが高速なトークナイザをサポートしているわけではありません。
モデルが高速なトークナイザをサポートしているかどうかを確認するには、この[表](index#supported-frameworks)をご覧ください。
</Tip>
独自のトークナイザをトレーニングした場合、*ボキャブラリー*ファイルからトークナイザを作成できます。
```py
>>> from transformers import DistilBertTokenizer
>>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left")
```
カスタムトークナイザーから生成される語彙は、事前学習済みモデルのトークナイザーが生成する語彙とは異なることを覚えておくことは重要です。
事前学習済みモデルを使用する場合は、事前学習済みモデルの語彙を使用する必要があります。そうしないと、入力が意味をなさなくなります。
[`DistilBertTokenizer`]クラスを使用して、事前学習済みモデルの語彙を持つトークナイザーを作成します:
```py
>>> from transformers import DistilBertTokenizer
>>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
```
[`DistilBertTokenizerFast`]クラスを使用して高速なトークナイザを作成します:
```py
>>> from transformers import DistilBertTokenizerFast
>>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased")
```
<Tip>
デフォルトでは、[`AutoTokenizer`]は高速なトークナイザを読み込もうとします。`from_pretrained`内で`use_fast=False`を設定することで、この動作を無効にすることができます。
</Tip>
## Image Processor
画像プロセッサはビジョン入力を処理します。これは基本クラス [`~image_processing_utils.ImageProcessingMixin`] を継承しています。
使用するには、使用しているモデルに関連付けられた画像プロセッサを作成します。
たとえば、画像分類に[ViT](model_doc/vit)を使用する場合、デフォルトの [`ViTImageProcessor`] を作成します。
```py
>>> from transformers import ViTImageProcessor
>>> vit_extractor = ViTImageProcessor()
>>> print(vit_extractor)
ViTImageProcessor {
"do_normalize": true,
"do_resize": true,
"image_processor_type": "ViTImageProcessor",
"image_mean": [
0.5,
0.5,
0.5
],
"image_std": [
0.5,
0.5,
0.5
],
"resample": 2,
"size": 224
}
```
<Tip>
カスタマイズを必要としない場合、モデルのデフォルトの画像プロセッサパラメータをロードするには、単純に`from_pretrained`メソッドを使用してください。
</Tip>
[`ViTImageProcessor`]のパラメータを変更して、カスタムの画像プロセッサを作成できます:
```py
>>> from transformers import ViTImageProcessor
>>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3])
>>> print(my_vit_extractor)
ViTImageProcessor {
"do_normalize": false,
"do_resize": true,
"image_processor_type": "ViTImageProcessor",
"image_mean": [
0.3,
0.3,
0.3
],
"image_std": [
0.5,
0.5,
0.5
],
"resample": "PIL.Image.BOX",
"size": 224
}
```
## Feature Extractor
フィーチャー抽出器は音声入力を処理します。これは基本的な [`~feature_extraction_utils.FeatureExtractionMixin`] クラスから継承され、音声入力を処理するための [`SequenceFeatureExtractor`] クラスからも継承されることがあります。
使用するには、モデルに関連付けられたフィーチャー抽出器を作成します。たとえば、音声分類に [Wav2Vec2](model_doc/wav2vec2) を使用する場合、デフォルトの [`Wav2Vec2FeatureExtractor`] を作成します。
```py
>>> from transformers import Wav2Vec2FeatureExtractor
>>> w2v2_extractor = Wav2Vec2FeatureExtractor()
>>> print(w2v2_extractor)
Wav2Vec2FeatureExtractor {
"do_normalize": true,
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
"feature_size": 1,
"padding_side": "right",
"padding_value": 0.0,
"return_attention_mask": false,
"sampling_rate": 16000
}
```
<Tip>
カスタマイズを行わない場合、モデルのデフォルトの特徴抽出器パラメーターをロードするには、単に `from_pretrained` メソッドを使用してください。
</Tip>
[`Wav2Vec2FeatureExtractor`] のパラメーターを変更して、カスタム特徴抽出器を作成できます:
```py
>>> from transformers import Wav2Vec2FeatureExtractor
>>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False)
>>> print(w2v2_extractor)
Wav2Vec2FeatureExtractor {
"do_normalize": false,
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
"feature_size": 1,
"padding_side": "right",
"padding_value": 0.0,
"return_attention_mask": false,
"sampling_rate": 8000
}
```
## Processor
マルチモーダルタスクをサポートするモデルに対して、🤗 Transformersは便利なプロセッサクラスを提供しています。
このプロセッサクラスは、特徴量抽出器やトークナイザなどの処理クラスを便利にラップし、単一のオブジェクトに結合します。
たとえば、自動音声認識タスク(ASR)用に[`Wav2Vec2Processor`]を使用してみましょう。
ASRは音声をテキストに転写するタスクであり、音声入力を処理するために特徴量抽出器とトークナイザが必要です。
音声入力を処理する特徴量抽出器を作成します:
```py
>>> from transformers import Wav2Vec2FeatureExtractor
>>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True)
```
テキスト入力を処理するトークナイザを作成します:
```py
>>> from transformers import Wav2Vec2CTCTokenizer
>>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt")
```
[`Wav2Vec2Processor`]で特徴量抽出器とトークナイザを組み合わせます:
```py
>>> from transformers import Wav2Vec2Processor
>>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
```
二つの基本クラス - 設定とモデル - および追加の前処理クラス(トークナイザ、画像プロセッサ、特徴抽出器、またはプロセッサ)を使用することで、🤗 Transformers がサポートするモデルのいずれかを作成できます。これらの基本クラスは設定可能で、必要な特性を使用できます。モデルをトレーニング用に簡単にセットアップしたり、既存の事前学習済みモデルを微調整することができます。 | unknown | github | https://github.com/huggingface/transformers | docs/source/ja/create_a_model.md |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerContextTest(test.TestCase):
def testBasics(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), "dump")
opts = builder(builder.time_and_memory()
).with_file_output(outfile).build()
x = lib.BuildFullModel()
profile_str = None
profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
total_steps = 101
for i in range(total_steps):
sess.run(x)
if i == 14 or i == 49:
self.assertTrue(gfile.Exists(outfile))
gfile.Remove(outfile)
if i == 99:
self.assertTrue(gfile.Exists(profile_step100))
with gfile.Open(outfile, "r") as f:
profile_str = f.read()
gfile.Remove(outfile)
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
with gfile.Open(outfile, "r") as f:
self.assertEqual(profile_str, f.read())
def testAutoTracingInDeubMode(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(), debug=True):
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
# Warm up, no tracing.
self.assertFalse("run_meta" in f)
sess.run(x)
self.assertTrue(
gfile.Exists(os.path.join(test.get_temp_dir(), "run_meta_11")))
gfile.Remove(os.path.join(test.get_temp_dir(), "run_meta_11"))
# fetched already.
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
self.assertFalse("run_meta" in f)
def testDisabled(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(),
enabled=False) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertTrue(pctx.profiler is None)
self.assertTrue(
getattr(session.BaseSession, "profile_context", None) is None)
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertFalse(pctx.profiler is None)
self.assertFalse(
getattr(session.BaseSession, "profile_context", None) is None)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
package daemon
import (
"github.com/moby/moby/api/types/container"
libcontainerdtypes "github.com/moby/moby/v2/daemon/internal/libcontainerd/types"
)
func toContainerdResources(resources container.Resources) *libcontainerdtypes.Resources {
// We don't support update, so do nothing
return nil
} | go | github | https://github.com/moby/moby | daemon/update_windows.go |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = "project/networks/subnets/_detail_overview.html"
def get_context_data(self, request):
subnet_id = self.tab_group.kwargs['subnet_id']
try:
subnet = api.neutron.subnet_get(self.request, subnet_id)
except Exception:
redirect = reverse('horizon:project:networks:index')
msg = _('Unable to retrieve subnet details.')
exceptions.handle(request, msg, redirect=redirect)
return {'subnet': subnet}
class SubnetDetailTabs(tabs.TabGroup):
slug = "subnet_details"
tabs = (OverviewTab,) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-31 09:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('creation', '0012_tutorialresource_publish_at'),
]
operations = [
migrations.AlterField(
model_name='adminreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='adminreviewernotification',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='adminreviewlog',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='adminreviewlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='archivedvideo',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='archivedvideo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='brochuredocument',
name='foss_course',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='brochuredocument',
name='foss_language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='brochurepage',
name='brochure',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='pages', to='creation.BrochureDocument'),
),
migrations.AlterField(
model_name='brochurepage',
name='page',
field=models.FileField(upload_to='brochures/'),
),
migrations.AlterField(
model_name='collaborate',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='collaborate',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='contributorlog',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='contributorlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='contributornotification',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='contributornotification',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='contributorrole',
name='foss_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='contributorrole',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='contributorrole',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='domainreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='domainreviewernotification',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='domainreviewerrole',
name='foss_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='domainreviewerrole',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='domainreviewerrole',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='domainreviewlog',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='domainreviewlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='fossavailablefortest',
name='foss',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='fossavailablefortest',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='fossavailableforworkshop',
name='foss',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='fossavailableforworkshop',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='fosscategory',
name='show_on_homepage',
field=models.BooleanField(default=True, help_text='If unchecked, this foss will be displayed on series page, instead of home page'),
),
migrations.AlterField(
model_name='fosscategory',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='language',
name='code',
field=models.CharField(default='en', max_length=10),
),
migrations.AlterField(
model_name='language',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='needimprovementlog',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='needimprovementlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='playlistinfo',
name='foss',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='playlistinfo',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='playlistitem',
name='playlist',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.PlaylistInfo'),
),
migrations.AlterField(
model_name='publicreviewlog',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='publicreviewlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='publishtutoriallog',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='publishtutoriallog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='qualityreviewernotification',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='qualityreviewernotification',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='qualityreviewerrole',
name='foss_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='qualityreviewerrole',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='qualityreviewerrole',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='qualityreviewlog',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='qualityreviewlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='rolerequest',
name='approved_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='approved_user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='rolerequest',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='suggestexample',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='suggesttopic',
name='difficulty_level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Level'),
),
migrations.AlterField(
model_name='suggesttopic',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='additional_material_user',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='additional_material', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='assignment_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='assignments', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='code_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='codes', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='keyword_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='keywords', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='prerequisite',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='prerequisite', to='creation.TutorialDetail'),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='prerequisite_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='prerequisite', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='slide_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='slides', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialcommoncontent',
name='tutorial_detail',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='tutorial_detail', to='creation.TutorialDetail'),
),
migrations.AlterField(
model_name='tutorialdetail',
name='foss',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='tutorialdetail',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Level'),
),
migrations.AlterField(
model_name='tutorialdetail',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialmissingcomponent',
name='tutorial_resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialResource'),
),
migrations.AlterField(
model_name='tutorialmissingcomponent',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='raised_user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialmissingcomponentreply',
name='missing_component',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialMissingComponent'),
),
migrations.AlterField(
model_name='tutorialmissingcomponentreply',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialresource',
name='common_content',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialCommonContent'),
),
migrations.AlterField(
model_name='tutorialresource',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.Language'),
),
migrations.AlterField(
model_name='tutorialresource',
name='outline_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='outlines', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialresource',
name='script_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='scripts', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tutorialresource',
name='tutorial_detail',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.TutorialDetail'),
),
migrations.AlterField(
model_name='tutorialresource',
name='video_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='videos', to=settings.AUTH_USER_MODEL),
),
] | unknown | codeparrot/codeparrot-clean | ||
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it.
**Description**
Returns all unique values from the combined input fields (set union). Null values are treated as empty sets; returns `null` only if both fields are null. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/esql/_snippets/functions/description/mv_union.md |
"""Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight) | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* foreigncmds.c
* foreign-data wrapper/server creation/manipulation commands
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
* src/backend/commands/foreigncmds.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "access/reloptions.h"
#include "access/table.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/objectaccess.h"
#include "catalog/pg_foreign_data_wrapper.h"
#include "catalog/pg_foreign_server.h"
#include "catalog/pg_foreign_table.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "catalog/pg_user_mapping.h"
#include "commands/defrem.h"
#include "foreign/fdwapi.h"
#include "foreign/foreign.h"
#include "miscadmin.h"
#include "parser/parse_func.h"
#include "tcop/utility.h"
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/syscache.h"
typedef struct
{
char *tablename;
char *cmd;
} import_error_callback_arg;
/* Internal functions */
static void import_error_callback(void *arg);
/*
* Convert a DefElem list to the text array format that is used in
* pg_foreign_data_wrapper, pg_foreign_server, pg_user_mapping, and
* pg_foreign_table.
*
* Returns the array in the form of a Datum, or PointerGetDatum(NULL)
* if the list is empty.
*
* Note: The array is usually stored to database without further
* processing, hence any validation should be done before this
* conversion.
*/
static Datum
optionListToArray(List *options)
{
ArrayBuildState *astate = NULL;
ListCell *cell;
foreach(cell, options)
{
DefElem *def = lfirst(cell);
const char *name;
const char *value;
Size len;
text *t;
name = def->defname;
value = defGetString(def);
/* Insist that name not contain "=", else "a=b=c" is ambiguous */
if (strchr(name, '=') != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid option name \"%s\": must not contain \"=\"",
name)));
len = VARHDRSZ + strlen(name) + 1 + strlen(value);
/* +1 leaves room for sprintf's trailing null */
t = palloc(len + 1);
SET_VARSIZE(t, len);
sprintf(VARDATA(t), "%s=%s", name, value);
astate = accumArrayResult(astate, PointerGetDatum(t),
false, TEXTOID,
CurrentMemoryContext);
}
if (astate)
return makeArrayResult(astate, CurrentMemoryContext);
return PointerGetDatum(NULL);
}
/*
* Transform a list of DefElem into text array format. This is substantially
* the same thing as optionListToArray(), except we recognize SET/ADD/DROP
* actions for modifying an existing list of options, which is passed in
* Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid
* it specifies a validator function to call on the result.
*
* Returns the array in the form of a Datum, or PointerGetDatum(NULL)
* if the list is empty.
*
* This is used by CREATE/ALTER of FOREIGN DATA WRAPPER/SERVER/USER MAPPING/
* FOREIGN TABLE.
*/
Datum
transformGenericOptions(Oid catalogId,
Datum oldOptions,
List *options,
Oid fdwvalidator)
{
List *resultOptions = untransformRelOptions(oldOptions);
ListCell *optcell;
Datum result;
foreach(optcell, options)
{
DefElem *od = lfirst(optcell);
ListCell *cell;
/*
* Find the element in resultOptions. We need this for validation in
* all cases.
*/
foreach(cell, resultOptions)
{
DefElem *def = lfirst(cell);
if (strcmp(def->defname, od->defname) == 0)
break;
}
/*
* It is possible to perform multiple SET/DROP actions on the same
* option. The standard permits this, as long as the options to be
* added are unique. Note that an unspecified action is taken to be
* ADD.
*/
switch (od->defaction)
{
case DEFELEM_DROP:
if (!cell)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("option \"%s\" not found",
od->defname)));
resultOptions = list_delete_cell(resultOptions, cell);
break;
case DEFELEM_SET:
if (!cell)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("option \"%s\" not found",
od->defname)));
lfirst(cell) = od;
break;
case DEFELEM_ADD:
case DEFELEM_UNSPEC:
if (cell)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("option \"%s\" provided more than once",
od->defname)));
resultOptions = lappend(resultOptions, od);
break;
default:
elog(ERROR, "unrecognized action %d on option \"%s\"",
(int) od->defaction, od->defname);
break;
}
}
result = optionListToArray(resultOptions);
if (OidIsValid(fdwvalidator))
{
Datum valarg = result;
/*
* Pass a null options list as an empty array, so that validators
* don't have to be declared non-strict to handle the case.
*/
if (DatumGetPointer(valarg) == NULL)
valarg = PointerGetDatum(construct_empty_array(TEXTOID));
OidFunctionCall2(fdwvalidator, valarg, ObjectIdGetDatum(catalogId));
}
return result;
}
/*
* Internal workhorse for changing a data wrapper's owner.
*
* Allow this only for superusers; also the new owner must be a
* superuser.
*/
static void
AlterForeignDataWrapperOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
{
Form_pg_foreign_data_wrapper form;
Datum repl_val[Natts_pg_foreign_data_wrapper];
bool repl_null[Natts_pg_foreign_data_wrapper];
bool repl_repl[Natts_pg_foreign_data_wrapper];
Acl *newAcl;
Datum aclDatum;
bool isNull;
form = (Form_pg_foreign_data_wrapper) GETSTRUCT(tup);
/* Must be a superuser to change a FDW owner */
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to change owner of foreign-data wrapper \"%s\"",
NameStr(form->fdwname)),
errhint("Must be superuser to change owner of a foreign-data wrapper.")));
/* New owner must also be a superuser */
if (!superuser_arg(newOwnerId))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to change owner of foreign-data wrapper \"%s\"",
NameStr(form->fdwname)),
errhint("The owner of a foreign-data wrapper must be a superuser.")));
if (form->fdwowner != newOwnerId)
{
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
repl_repl[Anum_pg_foreign_data_wrapper_fdwowner - 1] = true;
repl_val[Anum_pg_foreign_data_wrapper_fdwowner - 1] = ObjectIdGetDatum(newOwnerId);
aclDatum = heap_getattr(tup,
Anum_pg_foreign_data_wrapper_fdwacl,
RelationGetDescr(rel),
&isNull);
/* Null ACLs do not require changes */
if (!isNull)
{
newAcl = aclnewowner(DatumGetAclP(aclDatum),
form->fdwowner, newOwnerId);
repl_repl[Anum_pg_foreign_data_wrapper_fdwacl - 1] = true;
repl_val[Anum_pg_foreign_data_wrapper_fdwacl - 1] = PointerGetDatum(newAcl);
}
tup = heap_modify_tuple(tup, RelationGetDescr(rel), repl_val, repl_null,
repl_repl);
CatalogTupleUpdate(rel, &tup->t_self, tup);
/* Update owner dependency reference */
changeDependencyOnOwner(ForeignDataWrapperRelationId,
form->oid,
newOwnerId);
}
InvokeObjectPostAlterHook(ForeignDataWrapperRelationId,
form->oid, 0);
}
/*
* Change foreign-data wrapper owner -- by name
*
* Note restrictions in the "_internal" function, above.
*/
ObjectAddress
AlterForeignDataWrapperOwner(const char *name, Oid newOwnerId)
{
Oid fdwId;
HeapTuple tup;
Relation rel;
ObjectAddress address;
Form_pg_foreign_data_wrapper form;
rel = table_open(ForeignDataWrapperRelationId, RowExclusiveLock);
tup = SearchSysCacheCopy1(FOREIGNDATAWRAPPERNAME, CStringGetDatum(name));
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("foreign-data wrapper \"%s\" does not exist", name)));
form = (Form_pg_foreign_data_wrapper) GETSTRUCT(tup);
fdwId = form->oid;
AlterForeignDataWrapperOwner_internal(rel, tup, newOwnerId);
ObjectAddressSet(address, ForeignDataWrapperRelationId, fdwId);
heap_freetuple(tup);
table_close(rel, RowExclusiveLock);
return address;
}
/*
* Change foreign-data wrapper owner -- by OID
*
* Note restrictions in the "_internal" function, above.
*/
void
AlterForeignDataWrapperOwner_oid(Oid fwdId, Oid newOwnerId)
{
HeapTuple tup;
Relation rel;
rel = table_open(ForeignDataWrapperRelationId, RowExclusiveLock);
tup = SearchSysCacheCopy1(FOREIGNDATAWRAPPEROID, ObjectIdGetDatum(fwdId));
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("foreign-data wrapper with OID %u does not exist", fwdId)));
AlterForeignDataWrapperOwner_internal(rel, tup, newOwnerId);
heap_freetuple(tup);
table_close(rel, RowExclusiveLock);
}
/*
* Internal workhorse for changing a foreign server's owner
*/
static void
AlterForeignServerOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
{
Form_pg_foreign_server form;
Datum repl_val[Natts_pg_foreign_server];
bool repl_null[Natts_pg_foreign_server];
bool repl_repl[Natts_pg_foreign_server];
Acl *newAcl;
Datum aclDatum;
bool isNull;
form = (Form_pg_foreign_server) GETSTRUCT(tup);
if (form->srvowner != newOwnerId)
{
/* Superusers can always do it */
if (!superuser())
{
Oid srvId;
AclResult aclresult;
srvId = form->oid;
/* Must be owner */
if (!object_ownercheck(ForeignServerRelationId, srvId, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FOREIGN_SERVER,
NameStr(form->srvname));
/* Must be able to become new owner */
check_can_set_role(GetUserId(), newOwnerId);
/* New owner must have USAGE privilege on foreign-data wrapper */
aclresult = object_aclcheck(ForeignDataWrapperRelationId, form->srvfdw, newOwnerId, ACL_USAGE);
if (aclresult != ACLCHECK_OK)
{
ForeignDataWrapper *fdw = GetForeignDataWrapper(form->srvfdw);
aclcheck_error(aclresult, OBJECT_FDW, fdw->fdwname);
}
}
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
repl_repl[Anum_pg_foreign_server_srvowner - 1] = true;
repl_val[Anum_pg_foreign_server_srvowner - 1] = ObjectIdGetDatum(newOwnerId);
aclDatum = heap_getattr(tup,
Anum_pg_foreign_server_srvacl,
RelationGetDescr(rel),
&isNull);
/* Null ACLs do not require changes */
if (!isNull)
{
newAcl = aclnewowner(DatumGetAclP(aclDatum),
form->srvowner, newOwnerId);
repl_repl[Anum_pg_foreign_server_srvacl - 1] = true;
repl_val[Anum_pg_foreign_server_srvacl - 1] = PointerGetDatum(newAcl);
}
tup = heap_modify_tuple(tup, RelationGetDescr(rel), repl_val, repl_null,
repl_repl);
CatalogTupleUpdate(rel, &tup->t_self, tup);
/* Update owner dependency reference */
changeDependencyOnOwner(ForeignServerRelationId, form->oid,
newOwnerId);
}
InvokeObjectPostAlterHook(ForeignServerRelationId,
form->oid, 0);
}
/*
* Change foreign server owner -- by name
*/
ObjectAddress
AlterForeignServerOwner(const char *name, Oid newOwnerId)
{
Oid servOid;
HeapTuple tup;
Relation rel;
ObjectAddress address;
Form_pg_foreign_server form;
rel = table_open(ForeignServerRelationId, RowExclusiveLock);
tup = SearchSysCacheCopy1(FOREIGNSERVERNAME, CStringGetDatum(name));
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("server \"%s\" does not exist", name)));
form = (Form_pg_foreign_server) GETSTRUCT(tup);
servOid = form->oid;
AlterForeignServerOwner_internal(rel, tup, newOwnerId);
ObjectAddressSet(address, ForeignServerRelationId, servOid);
heap_freetuple(tup);
table_close(rel, RowExclusiveLock);
return address;
}
/*
* Change foreign server owner -- by OID
*/
void
AlterForeignServerOwner_oid(Oid srvId, Oid newOwnerId)
{
HeapTuple tup;
Relation rel;
rel = table_open(ForeignServerRelationId, RowExclusiveLock);
tup = SearchSysCacheCopy1(FOREIGNSERVEROID, ObjectIdGetDatum(srvId));
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("foreign server with OID %u does not exist", srvId)));
AlterForeignServerOwner_internal(rel, tup, newOwnerId);
heap_freetuple(tup);
table_close(rel, RowExclusiveLock);
}
/*
* Convert a handler function name passed from the parser to an Oid.
*/
static Oid
lookup_fdw_handler_func(DefElem *handler)
{
Oid handlerOid;
if (handler == NULL || handler->arg == NULL)
return InvalidOid;
/* handlers have no arguments */
handlerOid = LookupFuncName((List *) handler->arg, 0, NULL, false);
/* check that handler has correct return type */
if (get_func_rettype(handlerOid) != FDW_HANDLEROID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s",
NameListToString((List *) handler->arg), "fdw_handler")));
return handlerOid;
}
/*
* Convert a validator function name passed from the parser to an Oid.
*/
static Oid
lookup_fdw_validator_func(DefElem *validator)
{
Oid funcargtypes[2];
if (validator == NULL || validator->arg == NULL)
return InvalidOid;
/* validators take text[], oid */
funcargtypes[0] = TEXTARRAYOID;
funcargtypes[1] = OIDOID;
return LookupFuncName((List *) validator->arg, 2, funcargtypes, false);
/* validator's return value is ignored, so we don't check the type */
}
/*
* Process function options of CREATE/ALTER FDW
*/
static void
parse_func_options(ParseState *pstate, List *func_options,
bool *handler_given, Oid *fdwhandler,
bool *validator_given, Oid *fdwvalidator)
{
ListCell *cell;
*handler_given = false;
*validator_given = false;
/* return InvalidOid if not given */
*fdwhandler = InvalidOid;
*fdwvalidator = InvalidOid;
foreach(cell, func_options)
{
DefElem *def = (DefElem *) lfirst(cell);
if (strcmp(def->defname, "handler") == 0)
{
if (*handler_given)
errorConflictingDefElem(def, pstate);
*handler_given = true;
*fdwhandler = lookup_fdw_handler_func(def);
}
else if (strcmp(def->defname, "validator") == 0)
{
if (*validator_given)
errorConflictingDefElem(def, pstate);
*validator_given = true;
*fdwvalidator = lookup_fdw_validator_func(def);
}
else
elog(ERROR, "option \"%s\" not recognized",
def->defname);
}
}
/*
* Create a foreign-data wrapper
*/
ObjectAddress
CreateForeignDataWrapper(ParseState *pstate, CreateFdwStmt *stmt)
{
Relation rel;
Datum values[Natts_pg_foreign_data_wrapper];
bool nulls[Natts_pg_foreign_data_wrapper];
HeapTuple tuple;
Oid fdwId;
bool handler_given;
bool validator_given;
Oid fdwhandler;
Oid fdwvalidator;
Datum fdwoptions;
Oid ownerId;
ObjectAddress myself;
ObjectAddress referenced;
rel = table_open(ForeignDataWrapperRelationId, RowExclusiveLock);
/* Must be superuser */
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to create foreign-data wrapper \"%s\"",
stmt->fdwname),
errhint("Must be superuser to create a foreign-data wrapper.")));
/* For now the owner cannot be specified on create. Use effective user ID. */
ownerId = GetUserId();
/*
* Check that there is no other foreign-data wrapper by this name.
*/
if (GetForeignDataWrapperByName(stmt->fdwname, true) != NULL)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("foreign-data wrapper \"%s\" already exists",
stmt->fdwname)));
/*
* Insert tuple into pg_foreign_data_wrapper.
*/
memset(values, 0, sizeof(values));
memset(nulls, false, sizeof(nulls));
fdwId = GetNewOidWithIndex(rel, ForeignDataWrapperOidIndexId,
Anum_pg_foreign_data_wrapper_oid);
values[Anum_pg_foreign_data_wrapper_oid - 1] = ObjectIdGetDatum(fdwId);
values[Anum_pg_foreign_data_wrapper_fdwname - 1] =
DirectFunctionCall1(namein, CStringGetDatum(stmt->fdwname));
values[Anum_pg_foreign_data_wrapper_fdwowner - 1] = ObjectIdGetDatum(ownerId);
/* Lookup handler and validator functions, if given */
parse_func_options(pstate, stmt->func_options,
&handler_given, &fdwhandler,
&validator_given, &fdwvalidator);
values[Anum_pg_foreign_data_wrapper_fdwhandler - 1] = ObjectIdGetDatum(fdwhandler);
values[Anum_pg_foreign_data_wrapper_fdwvalidator - 1] = ObjectIdGetDatum(fdwvalidator);
nulls[Anum_pg_foreign_data_wrapper_fdwacl - 1] = true;
fdwoptions = transformGenericOptions(ForeignDataWrapperRelationId,
PointerGetDatum(NULL),
stmt->options,
fdwvalidator);
if (DatumGetPointer(fdwoptions) != NULL)
values[Anum_pg_foreign_data_wrapper_fdwoptions - 1] = fdwoptions;
else
nulls[Anum_pg_foreign_data_wrapper_fdwoptions - 1] = true;
tuple = heap_form_tuple(rel->rd_att, values, nulls);
CatalogTupleInsert(rel, tuple);
heap_freetuple(tuple);
/* record dependencies */
myself.classId = ForeignDataWrapperRelationId;
myself.objectId = fdwId;
myself.objectSubId = 0;
if (OidIsValid(fdwhandler))
{
referenced.classId = ProcedureRelationId;
referenced.objectId = fdwhandler;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
}
if (OidIsValid(fdwvalidator))
{
referenced.classId = ProcedureRelationId;
referenced.objectId = fdwvalidator;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
}
recordDependencyOnOwner(ForeignDataWrapperRelationId, fdwId, ownerId);
/* dependency on extension */
recordDependencyOnCurrentExtension(&myself, false);
/* Post creation hook for new foreign data wrapper */
InvokeObjectPostCreateHook(ForeignDataWrapperRelationId, fdwId, 0);
table_close(rel, RowExclusiveLock);
return myself;
}
/*
* Alter foreign-data wrapper
*/
ObjectAddress
AlterForeignDataWrapper(ParseState *pstate, AlterFdwStmt *stmt)
{
Relation rel;
HeapTuple tp;
Form_pg_foreign_data_wrapper fdwForm;
Datum repl_val[Natts_pg_foreign_data_wrapper];
bool repl_null[Natts_pg_foreign_data_wrapper];
bool repl_repl[Natts_pg_foreign_data_wrapper];
Oid fdwId;
bool isnull;
Datum datum;
bool handler_given;
bool validator_given;
Oid fdwhandler;
Oid fdwvalidator;
ObjectAddress myself;
rel = table_open(ForeignDataWrapperRelationId, RowExclusiveLock);
/* Must be superuser */
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to alter foreign-data wrapper \"%s\"",
stmt->fdwname),
errhint("Must be superuser to alter a foreign-data wrapper.")));
tp = SearchSysCacheCopy1(FOREIGNDATAWRAPPERNAME,
CStringGetDatum(stmt->fdwname));
if (!HeapTupleIsValid(tp))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("foreign-data wrapper \"%s\" does not exist", stmt->fdwname)));
fdwForm = (Form_pg_foreign_data_wrapper) GETSTRUCT(tp);
fdwId = fdwForm->oid;
memset(repl_val, 0, sizeof(repl_val));
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
parse_func_options(pstate, stmt->func_options,
&handler_given, &fdwhandler,
&validator_given, &fdwvalidator);
if (handler_given)
{
repl_val[Anum_pg_foreign_data_wrapper_fdwhandler - 1] = ObjectIdGetDatum(fdwhandler);
repl_repl[Anum_pg_foreign_data_wrapper_fdwhandler - 1] = true;
/*
* It could be that the behavior of accessing foreign table changes
* with the new handler. Warn about this.
*/
ereport(WARNING,
(errmsg("changing the foreign-data wrapper handler can change behavior of existing foreign tables")));
}
if (validator_given)
{
repl_val[Anum_pg_foreign_data_wrapper_fdwvalidator - 1] = ObjectIdGetDatum(fdwvalidator);
repl_repl[Anum_pg_foreign_data_wrapper_fdwvalidator - 1] = true;
/*
* It could be that existing options for the FDW or dependent SERVER,
* USER MAPPING or FOREIGN TABLE objects are no longer valid according
* to the new validator. Warn about this.
*/
if (OidIsValid(fdwvalidator))
ereport(WARNING,
(errmsg("changing the foreign-data wrapper validator can cause "
"the options for dependent objects to become invalid")));
}
else
{
/*
* Validator is not changed, but we need it for validating options.
*/
fdwvalidator = fdwForm->fdwvalidator;
}
/*
* If options specified, validate and update.
*/
if (stmt->options)
{
/* Extract the current options */
datum = SysCacheGetAttr(FOREIGNDATAWRAPPEROID,
tp,
Anum_pg_foreign_data_wrapper_fdwoptions,
&isnull);
if (isnull)
datum = PointerGetDatum(NULL);
/* Transform the options */
datum = transformGenericOptions(ForeignDataWrapperRelationId,
datum,
stmt->options,
fdwvalidator);
if (DatumGetPointer(datum) != NULL)
repl_val[Anum_pg_foreign_data_wrapper_fdwoptions - 1] = datum;
else
repl_null[Anum_pg_foreign_data_wrapper_fdwoptions - 1] = true;
repl_repl[Anum_pg_foreign_data_wrapper_fdwoptions - 1] = true;
}
/* Everything looks good - update the tuple */
tp = heap_modify_tuple(tp, RelationGetDescr(rel),
repl_val, repl_null, repl_repl);
CatalogTupleUpdate(rel, &tp->t_self, tp);
heap_freetuple(tp);
ObjectAddressSet(myself, ForeignDataWrapperRelationId, fdwId);
/* Update function dependencies if we changed them */
if (handler_given || validator_given)
{
ObjectAddress referenced;
/*
* Flush all existing dependency records of this FDW on functions; we
* assume there can be none other than the ones we are fixing.
*/
deleteDependencyRecordsForClass(ForeignDataWrapperRelationId,
fdwId,
ProcedureRelationId,
DEPENDENCY_NORMAL);
/* And build new ones. */
if (OidIsValid(fdwhandler))
{
referenced.classId = ProcedureRelationId;
referenced.objectId = fdwhandler;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
}
if (OidIsValid(fdwvalidator))
{
referenced.classId = ProcedureRelationId;
referenced.objectId = fdwvalidator;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
}
}
InvokeObjectPostAlterHook(ForeignDataWrapperRelationId, fdwId, 0);
table_close(rel, RowExclusiveLock);
return myself;
}
/*
* Create a foreign server
*/
ObjectAddress
CreateForeignServer(CreateForeignServerStmt *stmt)
{
Relation rel;
Datum srvoptions;
Datum values[Natts_pg_foreign_server];
bool nulls[Natts_pg_foreign_server];
HeapTuple tuple;
Oid srvId;
Oid ownerId;
AclResult aclresult;
ObjectAddress myself;
ObjectAddress referenced;
ForeignDataWrapper *fdw;
rel = table_open(ForeignServerRelationId, RowExclusiveLock);
/* For now the owner cannot be specified on create. Use effective user ID. */
ownerId = GetUserId();
/*
* Check that there is no other foreign server by this name. If there is
* one, do nothing if IF NOT EXISTS was specified.
*/
srvId = get_foreign_server_oid(stmt->servername, true);
if (OidIsValid(srvId))
{
if (stmt->if_not_exists)
{
/*
* If we are in an extension script, insist that the pre-existing
* object be a member of the extension, to avoid security risks.
*/
ObjectAddressSet(myself, ForeignServerRelationId, srvId);
checkMembershipInCurrentExtension(&myself);
/* OK to skip */
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("server \"%s\" already exists, skipping",
stmt->servername)));
table_close(rel, RowExclusiveLock);
return InvalidObjectAddress;
}
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("server \"%s\" already exists",
stmt->servername)));
}
/*
* Check that the FDW exists and that we have USAGE on it. Also get the
* actual FDW for option validation etc.
*/
fdw = GetForeignDataWrapperByName(stmt->fdwname, false);
aclresult = object_aclcheck(ForeignDataWrapperRelationId, fdw->fdwid, ownerId, ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FDW, fdw->fdwname);
/*
* Insert tuple into pg_foreign_server.
*/
memset(values, 0, sizeof(values));
memset(nulls, false, sizeof(nulls));
srvId = GetNewOidWithIndex(rel, ForeignServerOidIndexId,
Anum_pg_foreign_server_oid);
values[Anum_pg_foreign_server_oid - 1] = ObjectIdGetDatum(srvId);
values[Anum_pg_foreign_server_srvname - 1] =
DirectFunctionCall1(namein, CStringGetDatum(stmt->servername));
values[Anum_pg_foreign_server_srvowner - 1] = ObjectIdGetDatum(ownerId);
values[Anum_pg_foreign_server_srvfdw - 1] = ObjectIdGetDatum(fdw->fdwid);
/* Add server type if supplied */
if (stmt->servertype)
values[Anum_pg_foreign_server_srvtype - 1] =
CStringGetTextDatum(stmt->servertype);
else
nulls[Anum_pg_foreign_server_srvtype - 1] = true;
/* Add server version if supplied */
if (stmt->version)
values[Anum_pg_foreign_server_srvversion - 1] =
CStringGetTextDatum(stmt->version);
else
nulls[Anum_pg_foreign_server_srvversion - 1] = true;
/* Start with a blank acl */
nulls[Anum_pg_foreign_server_srvacl - 1] = true;
/* Add server options */
srvoptions = transformGenericOptions(ForeignServerRelationId,
PointerGetDatum(NULL),
stmt->options,
fdw->fdwvalidator);
if (DatumGetPointer(srvoptions) != NULL)
values[Anum_pg_foreign_server_srvoptions - 1] = srvoptions;
else
nulls[Anum_pg_foreign_server_srvoptions - 1] = true;
tuple = heap_form_tuple(rel->rd_att, values, nulls);
CatalogTupleInsert(rel, tuple);
heap_freetuple(tuple);
/* record dependencies */
myself.classId = ForeignServerRelationId;
myself.objectId = srvId;
myself.objectSubId = 0;
referenced.classId = ForeignDataWrapperRelationId;
referenced.objectId = fdw->fdwid;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
recordDependencyOnOwner(ForeignServerRelationId, srvId, ownerId);
/* dependency on extension */
recordDependencyOnCurrentExtension(&myself, false);
/* Post creation hook for new foreign server */
InvokeObjectPostCreateHook(ForeignServerRelationId, srvId, 0);
table_close(rel, RowExclusiveLock);
return myself;
}
/*
* Alter foreign server
*/
ObjectAddress
AlterForeignServer(AlterForeignServerStmt *stmt)
{
Relation rel;
HeapTuple tp;
Datum repl_val[Natts_pg_foreign_server];
bool repl_null[Natts_pg_foreign_server];
bool repl_repl[Natts_pg_foreign_server];
Oid srvId;
Form_pg_foreign_server srvForm;
ObjectAddress address;
rel = table_open(ForeignServerRelationId, RowExclusiveLock);
tp = SearchSysCacheCopy1(FOREIGNSERVERNAME,
CStringGetDatum(stmt->servername));
if (!HeapTupleIsValid(tp))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("server \"%s\" does not exist", stmt->servername)));
srvForm = (Form_pg_foreign_server) GETSTRUCT(tp);
srvId = srvForm->oid;
/*
* Only owner or a superuser can ALTER a SERVER.
*/
if (!object_ownercheck(ForeignServerRelationId, srvId, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FOREIGN_SERVER,
stmt->servername);
memset(repl_val, 0, sizeof(repl_val));
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
if (stmt->has_version)
{
/*
* Change the server VERSION string.
*/
if (stmt->version)
repl_val[Anum_pg_foreign_server_srvversion - 1] =
CStringGetTextDatum(stmt->version);
else
repl_null[Anum_pg_foreign_server_srvversion - 1] = true;
repl_repl[Anum_pg_foreign_server_srvversion - 1] = true;
}
if (stmt->options)
{
ForeignDataWrapper *fdw = GetForeignDataWrapper(srvForm->srvfdw);
Datum datum;
bool isnull;
/* Extract the current srvoptions */
datum = SysCacheGetAttr(FOREIGNSERVEROID,
tp,
Anum_pg_foreign_server_srvoptions,
&isnull);
if (isnull)
datum = PointerGetDatum(NULL);
/* Prepare the options array */
datum = transformGenericOptions(ForeignServerRelationId,
datum,
stmt->options,
fdw->fdwvalidator);
if (DatumGetPointer(datum) != NULL)
repl_val[Anum_pg_foreign_server_srvoptions - 1] = datum;
else
repl_null[Anum_pg_foreign_server_srvoptions - 1] = true;
repl_repl[Anum_pg_foreign_server_srvoptions - 1] = true;
}
/* Everything looks good - update the tuple */
tp = heap_modify_tuple(tp, RelationGetDescr(rel),
repl_val, repl_null, repl_repl);
CatalogTupleUpdate(rel, &tp->t_self, tp);
InvokeObjectPostAlterHook(ForeignServerRelationId, srvId, 0);
ObjectAddressSet(address, ForeignServerRelationId, srvId);
heap_freetuple(tp);
table_close(rel, RowExclusiveLock);
return address;
}
/*
* Common routine to check permission for user-mapping-related DDL
* commands. We allow server owners to operate on any mapping, and
* users to operate on their own mapping.
*/
static void
user_mapping_ddl_aclcheck(Oid umuserid, Oid serverid, const char *servername)
{
Oid curuserid = GetUserId();
if (!object_ownercheck(ForeignServerRelationId, serverid, curuserid))
{
if (umuserid == curuserid)
{
AclResult aclresult;
aclresult = object_aclcheck(ForeignServerRelationId, serverid, curuserid, ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, servername);
}
else
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FOREIGN_SERVER,
servername);
}
}
/*
* Create user mapping
*/
ObjectAddress
CreateUserMapping(CreateUserMappingStmt *stmt)
{
Relation rel;
Datum useoptions;
Datum values[Natts_pg_user_mapping];
bool nulls[Natts_pg_user_mapping];
HeapTuple tuple;
Oid useId;
Oid umId;
ObjectAddress myself;
ObjectAddress referenced;
ForeignServer *srv;
ForeignDataWrapper *fdw;
RoleSpec *role = (RoleSpec *) stmt->user;
rel = table_open(UserMappingRelationId, RowExclusiveLock);
if (role->roletype == ROLESPEC_PUBLIC)
useId = ACL_ID_PUBLIC;
else
useId = get_rolespec_oid(stmt->user, false);
/* Check that the server exists. */
srv = GetForeignServerByName(stmt->servername, false);
user_mapping_ddl_aclcheck(useId, srv->serverid, stmt->servername);
/*
* Check that the user mapping is unique within server.
*/
umId = GetSysCacheOid2(USERMAPPINGUSERSERVER, Anum_pg_user_mapping_oid,
ObjectIdGetDatum(useId),
ObjectIdGetDatum(srv->serverid));
if (OidIsValid(umId))
{
if (stmt->if_not_exists)
{
/*
* Since user mappings aren't members of extensions (see comments
* below), no need for checkMembershipInCurrentExtension here.
*/
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("user mapping for \"%s\" already exists for server \"%s\", skipping",
MappingUserName(useId),
stmt->servername)));
table_close(rel, RowExclusiveLock);
return InvalidObjectAddress;
}
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("user mapping for \"%s\" already exists for server \"%s\"",
MappingUserName(useId),
stmt->servername)));
}
fdw = GetForeignDataWrapper(srv->fdwid);
/*
* Insert tuple into pg_user_mapping.
*/
memset(values, 0, sizeof(values));
memset(nulls, false, sizeof(nulls));
umId = GetNewOidWithIndex(rel, UserMappingOidIndexId,
Anum_pg_user_mapping_oid);
values[Anum_pg_user_mapping_oid - 1] = ObjectIdGetDatum(umId);
values[Anum_pg_user_mapping_umuser - 1] = ObjectIdGetDatum(useId);
values[Anum_pg_user_mapping_umserver - 1] = ObjectIdGetDatum(srv->serverid);
/* Add user options */
useoptions = transformGenericOptions(UserMappingRelationId,
PointerGetDatum(NULL),
stmt->options,
fdw->fdwvalidator);
if (DatumGetPointer(useoptions) != NULL)
values[Anum_pg_user_mapping_umoptions - 1] = useoptions;
else
nulls[Anum_pg_user_mapping_umoptions - 1] = true;
tuple = heap_form_tuple(rel->rd_att, values, nulls);
CatalogTupleInsert(rel, tuple);
heap_freetuple(tuple);
/* Add dependency on the server */
myself.classId = UserMappingRelationId;
myself.objectId = umId;
myself.objectSubId = 0;
referenced.classId = ForeignServerRelationId;
referenced.objectId = srv->serverid;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
if (OidIsValid(useId))
{
/* Record the mapped user dependency */
recordDependencyOnOwner(UserMappingRelationId, umId, useId);
}
/*
* Perhaps someday there should be a recordDependencyOnCurrentExtension
* call here; but since roles aren't members of extensions, it seems like
* user mappings shouldn't be either. Note that the grammar and pg_dump
* would need to be extended too if we change this.
*/
/* Post creation hook for new user mapping */
InvokeObjectPostCreateHook(UserMappingRelationId, umId, 0);
table_close(rel, RowExclusiveLock);
return myself;
}
/*
* Alter user mapping
*/
ObjectAddress
AlterUserMapping(AlterUserMappingStmt *stmt)
{
Relation rel;
HeapTuple tp;
Datum repl_val[Natts_pg_user_mapping];
bool repl_null[Natts_pg_user_mapping];
bool repl_repl[Natts_pg_user_mapping];
Oid useId;
Oid umId;
ForeignServer *srv;
ObjectAddress address;
RoleSpec *role = (RoleSpec *) stmt->user;
rel = table_open(UserMappingRelationId, RowExclusiveLock);
if (role->roletype == ROLESPEC_PUBLIC)
useId = ACL_ID_PUBLIC;
else
useId = get_rolespec_oid(stmt->user, false);
srv = GetForeignServerByName(stmt->servername, false);
umId = GetSysCacheOid2(USERMAPPINGUSERSERVER, Anum_pg_user_mapping_oid,
ObjectIdGetDatum(useId),
ObjectIdGetDatum(srv->serverid));
if (!OidIsValid(umId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("user mapping for \"%s\" does not exist for server \"%s\"",
MappingUserName(useId), stmt->servername)));
user_mapping_ddl_aclcheck(useId, srv->serverid, stmt->servername);
tp = SearchSysCacheCopy1(USERMAPPINGOID, ObjectIdGetDatum(umId));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for user mapping %u", umId);
memset(repl_val, 0, sizeof(repl_val));
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
if (stmt->options)
{
ForeignDataWrapper *fdw;
Datum datum;
bool isnull;
/*
* Process the options.
*/
fdw = GetForeignDataWrapper(srv->fdwid);
datum = SysCacheGetAttr(USERMAPPINGUSERSERVER,
tp,
Anum_pg_user_mapping_umoptions,
&isnull);
if (isnull)
datum = PointerGetDatum(NULL);
/* Prepare the options array */
datum = transformGenericOptions(UserMappingRelationId,
datum,
stmt->options,
fdw->fdwvalidator);
if (DatumGetPointer(datum) != NULL)
repl_val[Anum_pg_user_mapping_umoptions - 1] = datum;
else
repl_null[Anum_pg_user_mapping_umoptions - 1] = true;
repl_repl[Anum_pg_user_mapping_umoptions - 1] = true;
}
/* Everything looks good - update the tuple */
tp = heap_modify_tuple(tp, RelationGetDescr(rel),
repl_val, repl_null, repl_repl);
CatalogTupleUpdate(rel, &tp->t_self, tp);
InvokeObjectPostAlterHook(UserMappingRelationId,
umId, 0);
ObjectAddressSet(address, UserMappingRelationId, umId);
heap_freetuple(tp);
table_close(rel, RowExclusiveLock);
return address;
}
/*
* Drop user mapping
*/
Oid
RemoveUserMapping(DropUserMappingStmt *stmt)
{
ObjectAddress object;
Oid useId;
Oid umId;
ForeignServer *srv;
RoleSpec *role = (RoleSpec *) stmt->user;
if (role->roletype == ROLESPEC_PUBLIC)
useId = ACL_ID_PUBLIC;
else
{
useId = get_rolespec_oid(stmt->user, stmt->missing_ok);
if (!OidIsValid(useId))
{
/*
* IF EXISTS specified, role not found and not public. Notice this
* and leave.
*/
elog(NOTICE, "role \"%s\" does not exist, skipping",
role->rolename);
return InvalidOid;
}
}
srv = GetForeignServerByName(stmt->servername, true);
if (!srv)
{
if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("server \"%s\" does not exist",
stmt->servername)));
/* IF EXISTS, just note it */
ereport(NOTICE,
(errmsg("server \"%s\" does not exist, skipping",
stmt->servername)));
return InvalidOid;
}
umId = GetSysCacheOid2(USERMAPPINGUSERSERVER, Anum_pg_user_mapping_oid,
ObjectIdGetDatum(useId),
ObjectIdGetDatum(srv->serverid));
if (!OidIsValid(umId))
{
if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("user mapping for \"%s\" does not exist for server \"%s\"",
MappingUserName(useId), stmt->servername)));
/* IF EXISTS specified, just note it */
ereport(NOTICE,
(errmsg("user mapping for \"%s\" does not exist for server \"%s\", skipping",
MappingUserName(useId), stmt->servername)));
return InvalidOid;
}
user_mapping_ddl_aclcheck(useId, srv->serverid, srv->servername);
/*
* Do the deletion
*/
object.classId = UserMappingRelationId;
object.objectId = umId;
object.objectSubId = 0;
performDeletion(&object, DROP_CASCADE, 0);
return umId;
}
/*
* Create a foreign table
* call after DefineRelation().
*/
void
CreateForeignTable(CreateForeignTableStmt *stmt, Oid relid)
{
Relation ftrel;
Datum ftoptions;
Datum values[Natts_pg_foreign_table];
bool nulls[Natts_pg_foreign_table];
HeapTuple tuple;
AclResult aclresult;
ObjectAddress myself;
ObjectAddress referenced;
Oid ownerId;
ForeignDataWrapper *fdw;
ForeignServer *server;
/*
* Advance command counter to ensure the pg_attribute tuple is visible;
* the tuple might be updated to add constraints in previous step.
*/
CommandCounterIncrement();
ftrel = table_open(ForeignTableRelationId, RowExclusiveLock);
/*
* For now the owner cannot be specified on create. Use effective user ID.
*/
ownerId = GetUserId();
/*
* Check that the foreign server exists and that we have USAGE on it. Also
* get the actual FDW for option validation etc.
*/
server = GetForeignServerByName(stmt->servername, false);
aclresult = object_aclcheck(ForeignServerRelationId, server->serverid, ownerId, ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, server->servername);
fdw = GetForeignDataWrapper(server->fdwid);
/*
* Insert tuple into pg_foreign_table.
*/
memset(values, 0, sizeof(values));
memset(nulls, false, sizeof(nulls));
values[Anum_pg_foreign_table_ftrelid - 1] = ObjectIdGetDatum(relid);
values[Anum_pg_foreign_table_ftserver - 1] = ObjectIdGetDatum(server->serverid);
/* Add table generic options */
ftoptions = transformGenericOptions(ForeignTableRelationId,
PointerGetDatum(NULL),
stmt->options,
fdw->fdwvalidator);
if (DatumGetPointer(ftoptions) != NULL)
values[Anum_pg_foreign_table_ftoptions - 1] = ftoptions;
else
nulls[Anum_pg_foreign_table_ftoptions - 1] = true;
tuple = heap_form_tuple(ftrel->rd_att, values, nulls);
CatalogTupleInsert(ftrel, tuple);
heap_freetuple(tuple);
/* Add pg_class dependency on the server */
myself.classId = RelationRelationId;
myself.objectId = relid;
myself.objectSubId = 0;
referenced.classId = ForeignServerRelationId;
referenced.objectId = server->serverid;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
table_close(ftrel, RowExclusiveLock);
}
/*
* Import a foreign schema
*/
void
ImportForeignSchema(ImportForeignSchemaStmt *stmt)
{
ForeignServer *server;
ForeignDataWrapper *fdw;
FdwRoutine *fdw_routine;
AclResult aclresult;
List *cmd_list;
ListCell *lc;
/* Check that the foreign server exists and that we have USAGE on it */
server = GetForeignServerByName(stmt->server_name, false);
aclresult = object_aclcheck(ForeignServerRelationId, server->serverid, GetUserId(), ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, server->servername);
/* Check that the schema exists and we have CREATE permissions on it */
(void) LookupCreationNamespace(stmt->local_schema);
/* Get the FDW and check it supports IMPORT */
fdw = GetForeignDataWrapper(server->fdwid);
if (!OidIsValid(fdw->fdwhandler))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("foreign-data wrapper \"%s\" has no handler",
fdw->fdwname)));
fdw_routine = GetFdwRoutine(fdw->fdwhandler);
if (fdw_routine->ImportForeignSchema == NULL)
ereport(ERROR,
(errcode(ERRCODE_FDW_NO_SCHEMAS),
errmsg("foreign-data wrapper \"%s\" does not support IMPORT FOREIGN SCHEMA",
fdw->fdwname)));
/* Call FDW to get a list of commands */
cmd_list = fdw_routine->ImportForeignSchema(stmt, server->serverid);
/* Parse and execute each command */
foreach(lc, cmd_list)
{
char *cmd = (char *) lfirst(lc);
import_error_callback_arg callback_arg;
ErrorContextCallback sqlerrcontext;
List *raw_parsetree_list;
ListCell *lc2;
/*
* Setup error traceback support for ereport(). This is so that any
* error in the generated SQL will be displayed nicely.
*/
callback_arg.tablename = NULL; /* not known yet */
callback_arg.cmd = cmd;
sqlerrcontext.callback = import_error_callback;
sqlerrcontext.arg = &callback_arg;
sqlerrcontext.previous = error_context_stack;
error_context_stack = &sqlerrcontext;
/*
* Parse the SQL string into a list of raw parse trees.
*/
raw_parsetree_list = pg_parse_query(cmd);
/*
* Process each parse tree (we allow the FDW to put more than one
* command per string, though this isn't really advised).
*/
foreach(lc2, raw_parsetree_list)
{
RawStmt *rs = lfirst_node(RawStmt, lc2);
CreateForeignTableStmt *cstmt = (CreateForeignTableStmt *) rs->stmt;
PlannedStmt *pstmt;
/*
* Because we only allow CreateForeignTableStmt, we can skip parse
* analysis, rewrite, and planning steps here.
*/
if (!IsA(cstmt, CreateForeignTableStmt))
elog(ERROR,
"foreign-data wrapper \"%s\" returned incorrect statement type %d",
fdw->fdwname, (int) nodeTag(cstmt));
/* Ignore commands for tables excluded by filter options */
if (!IsImportableForeignTable(cstmt->base.relation->relname, stmt))
continue;
/* Enable reporting of current table's name on error */
callback_arg.tablename = cstmt->base.relation->relname;
/* Ensure creation schema is the one given in IMPORT statement */
cstmt->base.relation->schemaname = pstrdup(stmt->local_schema);
/* No planning needed, just make a wrapper PlannedStmt */
pstmt = makeNode(PlannedStmt);
pstmt->commandType = CMD_UTILITY;
pstmt->canSetTag = false;
pstmt->utilityStmt = (Node *) cstmt;
pstmt->stmt_location = rs->stmt_location;
pstmt->stmt_len = rs->stmt_len;
pstmt->planOrigin = PLAN_STMT_INTERNAL;
/* Execute statement */
ProcessUtility(pstmt, cmd, false,
PROCESS_UTILITY_SUBCOMMAND, NULL, NULL,
None_Receiver, NULL);
/* Be sure to advance the command counter between subcommands */
CommandCounterIncrement();
callback_arg.tablename = NULL;
}
error_context_stack = sqlerrcontext.previous;
}
}
/*
* error context callback to let us supply the failing SQL statement's text
*/
static void
import_error_callback(void *arg)
{
import_error_callback_arg *callback_arg = (import_error_callback_arg *) arg;
int syntaxerrposition;
/* If it's a syntax error, convert to internal syntax error report */
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
{
errposition(0);
internalerrposition(syntaxerrposition);
internalerrquery(callback_arg->cmd);
}
if (callback_arg->tablename)
errcontext("importing foreign table \"%s\"",
callback_arg->tablename);
} | c | github | https://github.com/postgres/postgres | src/backend/commands/foreigncmds.c |
from social.exceptions import NotAllowedToDisconnect
def allowed_to_disconnect(strategy, user, name, user_storage,
association_id=None, *args, **kwargs):
if not user_storage.allowed_to_disconnect(user, name, association_id):
raise NotAllowedToDisconnect()
def get_entries(strategy, user, name, user_storage, association_id=None,
*args, **kwargs):
return {
'entries': user_storage.get_social_auth_for_user(
user, name, association_id
)
}
def revoke_tokens(strategy, entries, *args, **kwargs):
revoke_tokens = strategy.setting('REVOKE_TOKENS_ON_DISCONNECT', False)
if revoke_tokens:
for entry in entries:
if 'access_token' in entry.extra_data:
backend = entry.get_backend(strategy)(strategy)
backend.revoke_token(entry.extra_data['access_token'],
entry.uid)
def disconnect(strategy, entries, user_storage, *args, **kwargs):
for entry in entries:
user_storage.disconnect(entry) | unknown | codeparrot/codeparrot-clean | ||
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_info_s prof_info_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
typedef struct prof_recent_s prof_recent_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#ifndef JEMALLOC_PROF
/* Minimize memory bloat for non-prof builds. */
# define PROF_DUMP_BUFSIZE 1
#elif defined(JEMALLOC_DEBUG)
/* Use a small buffer size in debug build, mainly to facilitate testing. */
# define PROF_DUMP_BUFSIZE 16
#else
# define PROF_DUMP_BUFSIZE 65536
#endif
/* Size of size class related tables */
#ifdef JEMALLOC_PROF
# define PROF_SC_NSIZES SC_NSIZES
#else
/* Minimize memory bloat for non-prof builds. */
# define PROF_SC_NSIZES 1
#endif
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
#else
#define PROF_DUMP_FILENAME_LEN 1
#endif
/* Default number of recent allocations to record. */
#define PROF_RECENT_ALLOC_MAX_DEFAULT 0
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ | c | github | https://github.com/redis/redis | deps/jemalloc/include/jemalloc/internal/prof_types.h |
"""
Django settings for growth_studio project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q2c4xbdh)hf-$z7v1dyai3n^+(g%l5ogi17rm+rud^ysbx-(h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'127.0.0.1',
'growth.studio.ren'
]
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY= True
X_FRAME_OPTIONS = 'DENY'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'blog',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'growth_studio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'growth_studio.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
### settings.py file
### settings that are not environment dependent
try:
from .local_settings import *
except ImportError:
pass | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# encoding: utf-8
"""Parses a UltiSnips snippet definition and launches it into Vim."""
from UltiSnips.snippet.parsing.base import (
tokenize_snippet_text,
finalize,
resolve_ambiguity,
)
from UltiSnips.snippet.parsing.lexer import (
EscapeCharToken,
VisualToken,
TransformationToken,
ChoicesToken,
TabStopToken,
MirrorToken,
PythonCodeToken,
VimLCodeToken,
ShellCodeToken,
)
from UltiSnips.text_objects import (
EscapedChar,
Mirror,
PythonCode,
ShellCode,
TabStop,
Transformation,
VimLCode,
Visual,
Choices,
)
_TOKEN_TO_TEXTOBJECT = {
EscapeCharToken: EscapedChar,
VisualToken: Visual,
ShellCodeToken: ShellCode,
PythonCodeToken: PythonCode,
VimLCodeToken: VimLCode,
ChoicesToken: Choices,
}
__ALLOWED_TOKENS = [
EscapeCharToken,
VisualToken,
TransformationToken,
ChoicesToken,
TabStopToken,
MirrorToken,
PythonCodeToken,
VimLCodeToken,
ShellCodeToken,
]
def _create_transformations(all_tokens, seen_ts):
"""Create the objects that need to know about tabstops."""
for parent, token in all_tokens:
if isinstance(token, TransformationToken):
if token.number not in seen_ts:
raise RuntimeError(
"Tabstop %i is not known but is used by a Transformation"
% token.number
)
Transformation(parent, seen_ts[token.number], token)
def parse_and_instantiate(parent_to, text, indent):
"""Parses a snippet definition in UltiSnips format from 'text' assuming the
current 'indent'.
Will instantiate all the objects and link them as children to
parent_to. Will also put the initial text into Vim.
"""
all_tokens, seen_ts = tokenize_snippet_text(
parent_to,
text,
indent,
__ALLOWED_TOKENS,
__ALLOWED_TOKENS,
_TOKEN_TO_TEXTOBJECT,
)
resolve_ambiguity(all_tokens, seen_ts)
_create_transformations(all_tokens, seen_ts)
finalize(all_tokens, seen_ts, parent_to) | unknown | codeparrot/codeparrot-clean | ||
import {
type CodegenResult,
type CompilerOptions,
type DirectiveTransform,
type NodeTransform,
type ParserOptions,
type RootNode,
baseCompile,
baseParse,
noopDirectiveTransform,
} from '@vue/compiler-core'
import { parserOptions } from './parserOptions'
import { transformStyle } from './transforms/transformStyle'
import { transformVHtml } from './transforms/vHtml'
import { transformVText } from './transforms/vText'
import { transformModel } from './transforms/vModel'
import { transformOn } from './transforms/vOn'
import { transformShow } from './transforms/vShow'
import { transformTransition } from './transforms/Transition'
import { stringifyStatic } from './transforms/stringifyStatic'
import { ignoreSideEffectTags } from './transforms/ignoreSideEffectTags'
import { validateHtmlNesting } from './transforms/validateHtmlNesting'
import { extend } from '@vue/shared'
export { parserOptions }
export const DOMNodeTransforms: NodeTransform[] = [
transformStyle,
...(__DEV__ ? [transformTransition, validateHtmlNesting] : []),
]
export const DOMDirectiveTransforms: Record<string, DirectiveTransform> = {
cloak: noopDirectiveTransform,
html: transformVHtml,
text: transformVText,
model: transformModel, // override compiler-core
on: transformOn, // override compiler-core
show: transformShow,
}
export function compile(
src: string | RootNode,
options: CompilerOptions = {},
): CodegenResult {
return baseCompile(
src,
extend({}, parserOptions, options, {
nodeTransforms: [
// ignore <script> and <tag>
// this is not put inside DOMNodeTransforms because that list is used
// by compiler-ssr to generate vnode fallback branches
ignoreSideEffectTags,
...DOMNodeTransforms,
...(options.nodeTransforms || []),
],
directiveTransforms: extend(
{},
DOMDirectiveTransforms,
options.directiveTransforms || {},
),
transformHoist: __BROWSER__ ? null : stringifyStatic,
}),
)
}
export function parse(template: string, options: ParserOptions = {}): RootNode {
return baseParse(template, extend({}, parserOptions, options))
}
export * from './runtimeHelpers'
export { transformStyle } from './transforms/transformStyle'
export {
createDOMCompilerError,
DOMErrorCodes,
DOMErrorMessages,
} from './errors'
export * from '@vue/compiler-core' | typescript | github | https://github.com/vuejs/core | packages/compiler-dom/src/index.ts |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from ..anagonda.context import guru, godef
from ..anagonda.context.error import AnaGondaError
from commands.base import Command
class Goto(Command):
"""Run guru or godef
"""
def __init__(self, callback, uid, vid, code, filename, settings, go_env):
self.vid = vid
self.code = code
self.path = filename
self.settings = settings
self.go_env = go_env
super(Goto, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'result': self.goto(),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc())
self.callback({
'success': False,
'error': str(error),
'uid': self.uid,
'vid': self.vid
})
def goto(self):
"""Run GoDef, GuRu or both and return back a ready to use result
"""
guru_usage = self.settings.get('guru_usage', 'always')
if guru_usage == 'always':
return self._normalize(self.guru())
defs = []
try:
defs = self._normalize(self.godef())
except AnaGondaError as err:
logging.error('GoDef failed with error: {0}'.format(err))
if guru_usage == 'fallback':
defs = self._normalize(self.guru())
else:
if len(defs) == 0 and guru_usage == 'fallback':
defs = self._normalize(self.guru())
return defs
def guru(self):
"""Use Guru context and return back the result
"""
with guru.Guru(
None, 'definition', self.path,
self.settings.get('offset', 0),
self.settings.get('modified_buffer'), self.go_env) as defs:
return defs
def godef(self):
"""Use GoDef context and return back the result
"""
with godef.GoDef(
self.code, self.path,
self.settings.get('expr', ''), False, self.go_env) as defs:
return defs
def _normalize(self, defs):
"""Normalize tools output into anaconda's goto format
"""
if defs['tool'] == 'guru':
try:
return [{
'title': defs['objpos'].split(':')[0],
'position': defs['objpos']
}]
except:
return []
return [{
'title': defs['filename'],
'position': '{}:{}:{}'.format(
defs['filename'], defs['line'], defs['column']
)
}] | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import random, sys, time
from bisect import insort, bisect_left
from functools import wraps
from whoosh.compat import xrange
# These must be valid separate characters in CASE-INSENSTIVE filenames
IDCHARS = "0123456789abcdefghijklmnopqrstuvwxyz"
if hasattr(time, "perf_counter"):
now = time.perf_counter
elif sys.platform == 'win32':
now = time.clock
else:
now = time.time
def random_name(size=28):
return "".join(random.choice(IDCHARS) for _ in xrange(size))
def random_bytes(size=28):
gen = (random.randint(0, 255) for _ in xrange(size))
if sys.version_info[0] >= 3:
return bytes(gen)
else:
return array("B", gen).tostring()
def make_binary_tree(fn, args, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
arguments and returns a binary tree of results/instances.
>>> make_binary_tree(UnionMatcher, [matcher1, matcher2, matcher3])
UnionMatcher(matcher1, UnionMatcher(matcher2, matcher3))
Any keyword arguments given to this function are passed to the class
initializer.
"""
count = len(args)
if not count:
raise ValueError("Called make_binary_tree with empty list")
elif count == 1:
return args[0]
half = count // 2
return fn(make_binary_tree(fn, args[:half], **kwargs),
make_binary_tree(fn, args[half:], **kwargs), **kwargs)
def make_weighted_tree(fn, ls, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
(weight, argument) tuples and returns a huffman-like weighted tree of
results/instances.
"""
if not ls:
raise ValueError("Called make_weighted_tree with empty list")
ls.sort()
while len(ls) > 1:
a = ls.pop(0)
b = ls.pop(0)
insort(ls, (a[0] + b[0], fn(a[1], b[1])))
return ls[0][1]
# Fibonacci function
_fib_cache = {}
def fib(n):
"""Returns the nth value in the Fibonacci sequence.
"""
if n <= 2:
return n
if n in _fib_cache:
return _fib_cache[n]
result = fib(n - 1) + fib(n - 2)
_fib_cache[n] = result
return result
# Decorators
def synchronized(func):
"""Decorator for storage-access methods, which synchronizes on a threading
lock. The parent object must have 'is_closed' and '_sync_lock' attributes.
"""
@wraps(func)
def synchronized_wrapper(self, *args, **kwargs):
with self._sync_lock:
return func(self, *args, **kwargs)
return synchronized_wrapper
def unclosed(method):
"""
Decorator to check if the object is closed.
"""
@wraps(method)
def unclosed_wrapper(self, *args, **kwargs):
if self.closed:
raise ValueError("Operation on a closed object")
return method(self, *args, **kwargs)
return unclosed_wrapper | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'AimsUI/AimsClient/Gui/Ui_AimsQueueWidget.ui'
#
# Created: Tue Jun 14 09:56:35 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_AimsQueueWidget(object):
def setupUi(self, AimsQueueWidget):
AimsQueueWidget.setObjectName(_fromUtf8("AimsQueueWidget"))
AimsQueueWidget.resize(661, 428)
self.verticalLayout = QtGui.QVBoxLayout(AimsQueueWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.tabWidget = QtGui.QTabWidget(AimsQueueWidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.uEditFeatureTab = EditFeatureWidget()
self.uEditFeatureTab.setObjectName(_fromUtf8("uEditFeatureTab"))
self.tabWidget.addTab(self.uEditFeatureTab, _fromUtf8(""))
self.uResolutionTab = ReviewQueueWidget()
self.uResolutionTab.setObjectName(_fromUtf8("uResolutionTab"))
self.tabWidget.addTab(self.uResolutionTab, _fromUtf8(""))
self.verticalLayout.addWidget(self.tabWidget)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.uMessageLabel = QtGui.QLabel(AimsQueueWidget)
self.uMessageLabel.setText(_fromUtf8(""))
self.uMessageLabel.setObjectName(_fromUtf8("uMessageLabel"))
self.horizontalLayout_5.addWidget(self.uMessageLabel)
self.horizontalLayout_5.setStretch(0, 1)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.retranslateUi(AimsQueueWidget)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(AimsQueueWidget)
def retranslateUi(self, AimsQueueWidget):
AimsQueueWidget.setWindowTitle(_translate("AimsQueueWidget", "Form", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.uEditFeatureTab), _translate("AimsQueueWidget", "Edit Feature", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.uResolutionTab), _translate("AimsQueueWidget", "Review", None))
from ReviewQueueWidget import ReviewQueueWidget
from EditFeatureWidget import EditFeatureWidget | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "transformers @ git+https://github.com/huggingface/transformers.git",
# "albumentations >= 1.4.16",
# "accelerate >= 0.12.0",
# "torch >= 1.3",
# "datasets >= 2.14.0",
# "sentencepiece != 0.1.92",
# "protobuf",
# "evaluate",
# "scikit-learn",
# ]
# ///
"""
Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...)
on a text file or a dataset without using HuggingFace Trainer.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=fill-mask
"""
# You can also adapt this script on your own mlm task. Pointers for this are left as comments.
import argparse
import json
import logging
import math
import os
import random
from itertools import chain
from pathlib import Path
import datasets
import torch
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from datasets import load_dataset
from huggingface_hub import HfApi
from torch import nn
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
SchedulerType,
get_scheduler,
)
from transformers.trainer_pt_utils import get_parameter_names
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.57.0.dev0")
logger = get_logger(__name__)
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a Masked Language Modeling task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--validation_split_percentage",
default=5,
help="The percentage of the train set used as validation set in case there's no validation split",
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the Hugging Face Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument(
"--max_seq_length",
type=int,
default=None,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated."
),
)
parser.add_argument(
"--line_by_line",
type=bool,
default=False,
help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
action="store_true",
help=(
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
),
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. '
"Only applicable when `--with_tracking` is passed."
),
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError("`train_file` should be a csv, json or txt file.")
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError("`validation_file` should be a csv, json or txt file.")
if args.push_to_hub:
if args.output_dir is None:
raise ValueError("Need an `output_dir` to create a repo when `--push_to_hub` is passed.")
return args
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
accelerator_log_kwargs["project_dir"] = args.output_dir
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
# Retrieve of infer repo_name
repo_name = args.hub_model_id
if repo_name is None:
repo_name = Path(args.output_dir).absolute().name
# Create repo and retrieve repo_id
api = HfApi()
repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
args.dataset_name, args.dataset_config_name, trust_remote_code=args.trust_remote_code
)
if "validation" not in raw_datasets:
raw_datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
trust_remote_code=args.trust_remote_code,
)
raw_datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
trust_remote_code=args.trust_remote_code,
)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
extension = args.train_file.split(".")[-1]
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.validation_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets:
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name, trust_remote_code=args.trust_remote_code)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
trust_remote_code=args.trust_remote_code,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config, trust_remote_code=args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test.
embedding_size = model.get_input_embeddings().weight.shape[0]
if len(tokenizer) > embedding_size:
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
max_seq_length = 1024
else:
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the "
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)
if args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
with accelerator.main_process_first():
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
with accelerator.main_process_first():
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, and if the total_length < max_seq_length we exclude this batch and return an empty dict.
# We could add padding if the model supported it instead of this drop, you can customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/process#map
with accelerator.main_process_first():
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
train_dataset = tokenized_datasets["train"]
eval_dataset = tokenized_datasets["validation"]
# Conditional for small test subsets
if len(train_dataset) > 3:
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Data collator
# This one will take care of randomly masking the tokens.
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=args.mlm_probability)
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
forbidden_name_patterns = [r"bias", r"layernorm", r"rmsnorm", r"(?:^|\.)norm(?:$|\.)", r"_norm(?:$|\.)"]
decay_parameters = get_parameter_names(model, [nn.LayerNorm], forbidden_layer_names=forbidden_name_patterns)
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if n in decay_parameters and p.requires_grad],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if n not in decay_parameters and p.requires_grad],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps
if overrode_max_train_steps
else args.max_train_steps * accelerator.num_processes,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Figure out how many steps we should save the Accelerator states
checkpointing_steps = args.checkpointing_steps
if checkpointing_steps is not None and checkpointing_steps.isdigit():
checkpointing_steps = int(checkpointing_steps)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("mlm_no_trainer", experiment_config)
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
checkpoint_path = args.resume_from_checkpoint
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
checkpoint_path = path
path = os.path.basename(checkpoint_path)
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
accelerator.load_state(checkpoint_path)
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
completed_steps = starting_epoch * num_update_steps_per_epoch
else:
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps
starting_epoch = resume_step // len(train_dataloader)
completed_steps = resume_step // args.gradient_accumulation_steps
resume_step -= starting_epoch * len(train_dataloader)
# update the progress_bar if load from checkpoint
progress_bar.update(completed_steps)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We skip the first `n` batches in the dataloader when resuming from a checkpoint
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
else:
active_dataloader = train_dataloader
for step, batch in enumerate(active_dataloader):
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
output_dir = f"step_{completed_steps}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))
losses = torch.cat(losses)
try:
eval_loss = torch.mean(losses)
perplexity = math.exp(eval_loss)
except OverflowError:
perplexity = float("inf")
logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
if args.with_tracking:
accelerator.log(
{
"perplexity": perplexity,
"eval_loss": eval_loss,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
"step": completed_steps,
},
step=completed_steps,
)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
api.upload_folder(
commit_message=f"Training in progress epoch {epoch}",
folder_path=args.output_dir,
repo_id=repo_id,
repo_type="model",
token=args.hub_token,
)
if args.checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
api.upload_folder(
commit_message="End of training",
folder_path=args.output_dir,
repo_id=repo_id,
repo_type="model",
token=args.hub_token,
)
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump({"perplexity": perplexity}, f)
accelerator.wait_for_everyone()
accelerator.end_training()
if __name__ == "__main__":
main() | python | github | https://github.com/huggingface/transformers | examples/pytorch/language-modeling/run_mlm_no_trainer.py |
import { assertCode, compileSFCScript as compile } from '../utils'
// in dev mode, declared bindings are returned as an object from setup()
// when using TS, users may import types which should not be returned as
// values, so we need to check import usage in the template to determine
// what to be returned.
test('components', () => {
const { content } = compile(`
<script setup lang="ts">
import { FooBar, FooBaz, FooQux, foo } from './x'
const fooBar: FooBar = 1
</script>
<template>
<FooBaz></FooBaz>
<foo-qux/>
<foo/>
FooBar
</template>
`)
// FooBar: should not be matched by plain text or incorrect case
// FooBaz: used as PascalCase component
// FooQux: used as kebab-case component
// foo: lowercase component
expect(content).toMatch(
`return { fooBar, get FooBaz() { return FooBaz }, ` +
`get FooQux() { return FooQux }, get foo() { return foo } }`,
)
assertCode(content)
})
test('directive', () => {
const { content } = compile(`
<script setup lang="ts">
import { vMyDir } from './x'
</script>
<template>
<div v-my-dir></div>
</template>
`)
expect(content).toMatch(`return { get vMyDir() { return vMyDir } }`)
assertCode(content)
})
test('dynamic arguments', () => {
const { content } = compile(`
<script setup lang="ts">
import { FooBar, foo, bar, unused, baz, msg } from './x'
</script>
<template>
<FooBar #[foo.slotName] />
<FooBar #unused />
<div :[bar.attrName]="15"></div>
<div unused="unused"></div>
<div #[\`item:\${baz.key}\`]="{ value }"></div>
<FooBar :msg />
</template>
`)
expect(content).toMatch(
`return { get FooBar() { return FooBar }, get foo() { return foo }, ` +
`get bar() { return bar }, get baz() { return baz }, get msg() { return msg } }`,
)
assertCode(content)
})
// https://github.com/vuejs/core/issues/4599
test('attribute expressions', () => {
const { content } = compile(`
<script setup lang="ts">
import { bar, baz } from './x'
const cond = true
</script>
<template>
<div :class="[cond ? '' : bar(), 'default']" :style="baz"></div>
</template>
`)
expect(content).toMatch(
`return { cond, get bar() { return bar }, get baz() { return baz } }`,
)
assertCode(content)
})
test('vue interpolations', () => {
const { content } = compile(`
<script setup lang="ts">
import { x, y, z, x$y } from './x'
</script>
<template>
<div :id="z + 'y'">{{ x }} {{ yy }} {{ x$y }}</div>
</template>
`)
// x: used in interpolation
// y: should not be matched by {{ yy }} or 'y' in binding exps
// x$y: #4274 should escape special chars when creating Regex
expect(content).toMatch(
`return { get x() { return x }, get z() { return z }, get x$y() { return x$y } }`,
)
assertCode(content)
})
// #4340 interpolations in template strings
test('js template string interpolations', () => {
const { content } = compile(`
<script setup lang="ts">
import { VAR, VAR2, VAR3 } from './x'
</script>
<template>
{{ \`\${VAR}VAR2\${VAR3}\` }}
</template>
`)
// VAR2 should not be matched
expect(content).toMatch(
`return { get VAR() { return VAR }, get VAR3() { return VAR3 } }`,
)
assertCode(content)
})
// edge case: last tag in template
test('last tag', () => {
const { content } = compile(`
<script setup lang="ts">
import { FooBaz, Last } from './x'
</script>
<template>
<FooBaz></FooBaz>
<Last/>
</template>
`)
expect(content).toMatch(
`return { get FooBaz() { return FooBaz }, get Last() { return Last } }`,
)
assertCode(content)
})
test('TS annotations', () => {
const { content } = compile(`
<script setup lang="ts">
import { Foo, Bar, Baz, Qux, Fred } from './x'
const a = 1
function b() {}
</script>
<template>
{{ a as Foo }}
{{ b<Bar>() }}
{{ Baz }}
<Comp v-slot="{ data }: Qux">{{ data }}</Comp>
<div v-for="{ z = x as Qux } in list as Fred"/>
</template>
`)
expect(content).toMatch(`return { a, b, get Baz() { return Baz } }`)
assertCode(content)
})
// vuejs/vue#12591
test('v-on inline statement', () => {
// should not error
compile(`
<script setup lang="ts">
import { foo } from './foo'
</script>
<template>
<div @click="$emit('update:a');"></div>
</template>
`)
})
test('template ref', () => {
const { content } = compile(`
<script setup lang="ts">
import { foo, bar, Baz } from './foo'
</script>
<template>
<div ref="foo"></div>
<div ref=""></div>
<Baz ref="bar" />
</template>
`)
expect(content).toMatch(
'return { get foo() { return foo }, get bar() { return bar }, get Baz() { return Baz } }',
)
assertCode(content)
})
// https://github.com/nuxt/nuxt/issues/22416
test('property access', () => {
const { content } = compile(`
<script setup lang="ts">
import { Foo, Bar, Baz } from './foo'
</script>
<template>
<div>{{ Foo.Bar.Baz }}</div>
</template>
`)
expect(content).toMatch('return { get Foo() { return Foo } }')
assertCode(content)
})
test('spread operator', () => {
const { content } = compile(`
<script setup lang="ts">
import { Foo, Bar, Baz } from './foo'
</script>
<template>
<div v-bind="{ ...Foo.Bar.Baz }"></div>
</template>
`)
expect(content).toMatch('return { get Foo() { return Foo } }')
assertCode(content)
})
test('property access (whitespace)', () => {
const { content } = compile(`
<script setup lang="ts">
import { Foo, Bar, Baz } from './foo'
</script>
<template>
<div>{{ Foo . Bar . Baz }}</div>
</template>
`)
expect(content).toMatch('return { get Foo() { return Foo } }')
assertCode(content)
})
// #9974
test('namespace / dot component usage', () => {
const { content } = compile(`
<script setup lang="ts">
import * as Foo from './foo'
</script>
<template>
<Foo.Bar />
</template>
`)
expect(content).toMatch('return { get Foo() { return Foo } }')
assertCode(content)
})
test('check when has explicit parse options', () => {
const { content } = compile(
`
<script setup lang="ts">
import { x } from './x'
</script>
<template>
{{ x }}
</template>
`,
undefined,
{ templateParseOptions: {} },
)
expect(content).toMatch('return { get x() { return x } }')
})
// #11745
test('shorthand binding w/ kebab-case', () => {
const { content } = compile(
`
<script setup lang="ts">
import { fooBar } from "./foo.ts"
</script>
<template>
<div :foo-bar></div>
</template>
`,
)
expect(content).toMatch('return { get fooBar() { return fooBar }')
}) | typescript | github | https://github.com/vuejs/core | packages/compiler-sfc/__tests__/compileScript/importUsageCheck.spec.ts |
<?php
namespace Illuminate\Tests\Routing;
use Illuminate\Routing\RouteUri;
use PHPUnit\Framework\Attributes\DataProvider;
use PHPUnit\Framework\TestCase;
class RouteUriTest extends TestCase
{
#[DataProvider('uriProvider')]
public function testRouteUrisAreProperlyParsed($uri, $expectedParsedUri, $expectedBindingFields)
{
$parsed = RouteUri::parse($uri);
$this->assertSame($expectedParsedUri, $parsed->uri);
$this->assertEquals($expectedBindingFields, $parsed->bindingFields);
}
/**
* @return array
*/
public static function uriProvider()
{
return [
[
'/foo',
'/foo',
[],
],
[
'/foo/{bar}',
'/foo/{bar}',
[],
],
[
'/foo/{bar}/baz/{qux}',
'/foo/{bar}/baz/{qux}',
[],
],
[
'/foo/{bar}/baz/{qux?}',
'/foo/{bar}/baz/{qux?}',
[],
],
[
'/foo/{bar:slug}',
'/foo/{bar}',
['bar' => 'slug'],
],
[
'/foo/{bar}/baz/{qux:slug}',
'/foo/{bar}/baz/{qux}',
['qux' => 'slug'],
],
[
'/foo/{bar}/baz/{qux:slug}',
'/foo/{bar}/baz/{qux}',
['qux' => 'slug'],
],
[
'/foo/{bar}/baz/{qux:slug?}',
'/foo/{bar}/baz/{qux?}',
['qux' => 'slug'],
],
[
'/foo/{bar}/baz/{qux:slug?}/{test:id?}',
'/foo/{bar}/baz/{qux?}/{test?}',
['qux' => 'slug', 'test' => 'id'],
],
];
}
} | php | github | https://github.com/laravel/framework | tests/Routing/RouteUriTest.php |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import timeit
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def composed_sampler(logits, num_samples):
# [batch size, num classes, num samples]
unif = random_ops.random_uniform(logits.get_shape().concatenate(
tensor_shape.TensorShape([num_samples])))
noise = -math_ops.log(-math_ops.log(unif))
# [batch size, num classes, 1]
logits = array_ops.expand_dims(logits, -1)
# [batch size, num samples]
return math_ops.argmax(logits + noise, dimension=1)
native_sampler = random_ops.multinomial
class MultinomialTest(test.TestCase):
use_gpu = False
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
with self.test_session(use_gpu=self.use_gpu):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = random_ops.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
def testOneOpMultipleStepsIndependent(self):
with self.test_session(use_gpu=self.use_gpu) as sess:
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = sess.run(sample_op1)
sample1b = sess.run(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
def testTwoOpsIndependent(self):
with self.test_session(use_gpu=self.use_gpu) as sess:
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = sess.run([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
def testTwoOpsSameSeedDrawSameSequences(self):
with self.test_session(use_gpu=self.use_gpu) as sess:
sample_op1, sample_op2 = self._make_ops(1000, seed=1)
sample1, sample2 = sess.run([sample_op1, sample_op2])
self.assertAllEqual(sample1, sample2)
def testLargeLogits(self):
for neg in [True, False]:
with self.test_session(use_gpu=self.use_gpu):
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = random_ops.multinomial(logits, 10).eval()
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 21000
rand_probs = self._normalize(np.random.random_sample((10,)))
rand_probs2 = self._normalize(np.random.random_sample((3, 5))) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
composed_freqs = self._do_sampling(logits, num_samples, composed_sampler)
native_freqs = self._do_sampling(logits, num_samples, native_sampler)
# the test here is similar to core/lib/random/distribution_sampler_test.cc
composed_chi2 = self._chi2(probs, composed_freqs)
native_chi2 = self._chi2(probs, native_freqs)
composed_native_chi2 = self._chi2(composed_freqs, native_freqs)
def check(chi2s):
for chi2 in chi2s:
self.assertLess(chi2, 1e-3)
check(composed_chi2)
check(native_chi2)
check(composed_native_chi2)
def _make_ops(self, num_samples, seed=None):
prob_dist = constant_op.constant([[0.15, 0.5, 0.3, 0.05]])
logits = math_ops.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
def _normalize(self, vec):
batched = (len(vec.shape) == 2)
return vec / vec.sum(axis=1, keepdims=True) if batched else vec / vec.sum()
def _do_sampling(self, logits, num_samples, sampler):
"""Samples using the supplied sampler and inputs.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
sampler: A sampler function that takes (1) a [batch_size, num_classes]
Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.test_session(use_gpu=self.use_gpu) as sess:
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = sess.run(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def testEmpty(self):
classes = 5
with self.test_session(use_gpu=self.use_gpu):
for batch in 0, 3:
for samples in 0, 7:
x = random_ops.multinomial(
array_ops.zeros([batch, classes]), samples).eval()
self.assertEqual(x.shape, (batch, samples))
def testEmptyClasses(self):
with self.test_session(use_gpu=self.use_gpu):
x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
x.eval()
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with self.test_session(use_gpu=self.use_gpu):
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = random_ops.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1023] * num_samples], samples)
class MultinomialGpuTest(MultinomialTest):
use_gpu = True
# Benchmarking code
def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
np.random.seed(1618) # Make it reproducible.
shape = [batch_size, num_classes]
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
logits = constant_op.constant(logits_np, shape=shape)
native_op = control_flow_ops.group(native_sampler(logits, num_samples))
composed_op = control_flow_ops.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
class MultinomialBenchmark(test.Benchmark):
def benchmarkNativeOpVsComposedOps(self):
num_iters = 50
print("Composition of existing ops vs. Native Multinomial op [%d iters]" %
num_iters)
print("BatchSize\tNumClasses\tNumSamples\tsec(composed)\tsec(native)\t"
"speedup")
for batch_size in [32, 128]:
for num_classes in [10000, 100000]:
for num_samples in [1, 4, 32]:
n_dt, c_dt = native_op_vs_composed_ops(batch_size, num_classes,
num_samples, num_iters)
print("%d\t%d\t%d\t%.3f\t%.3f\t%.2f" % (batch_size, num_classes,
num_samples, c_dt, n_dt,
c_dt / n_dt))
self.report_benchmark(
name="native_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=n_dt)
self.report_benchmark(
name="composed_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=c_dt)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.util.reflection;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
import static org.mockito.BDDMockito.given;
import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.Map;
import java.util.Observer;
import java.util.Set;
import org.junit.After;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentMatchers;
import org.mockito.Mock;
import org.mockito.exceptions.base.MockitoException;
import org.mockito.internal.util.reflection.FieldInitializer.ConstructorArgumentResolver;
import org.mockito.internal.util.reflection.FieldInitializer.ParameterizedConstructorInstantiator;
import org.mockito.junit.MockitoJUnitRunner;
@SuppressWarnings("unchecked")
@RunWith(MockitoJUnitRunner.class)
public class ParameterizedConstructorInstantiatorTest {
private Set<?> whateverForNow;
private OneConstructor withOneConstructor;
private MultipleConstructor withMultipleConstructor;
private NoArgConstructor withNoArgConstructor;
private ThrowingConstructor withThrowingConstructor;
private VarargConstructor withVarargConstructor;
@After
public void ensure_instances_to_create_are_null() {
withMultipleConstructor = null;
withOneConstructor = null;
withNoArgConstructor = null;
withThrowingConstructor = null;
withVarargConstructor = null;
}
@Mock private ConstructorArgumentResolver resolver;
@Test
public void should_be_created_with_an_argument_resolver() throws Exception {
new ParameterizedConstructorInstantiator(this, field("whateverForNow"), resolver);
}
@Test
public void
should_fail_if_no_parameterized_constructor_found___excluding_inner_and_others_kind_of_types()
throws Exception {
try {
new ParameterizedConstructorInstantiator(this, field("withNoArgConstructor"), resolver)
.instantiate();
fail();
} catch (MockitoException me) {
assertThat(me.getMessage())
.contains("no parameterized constructor")
.contains("withNoArgConstructor")
.contains("NoArgConstructor");
}
}
@Test
public void should_instantiate_type_if_resolver_provide_matching_types() throws Exception {
Observer observer = mock(Observer.class);
Map map = mock(Map.class);
given(resolver.resolveTypeInstances(ArgumentMatchers.any(Class[].class)))
.willReturn(new Object[] {observer, map});
new ParameterizedConstructorInstantiator(this, field("withMultipleConstructor"), resolver)
.instantiate();
assertNotNull(withMultipleConstructor);
assertNotNull(withMultipleConstructor.observer);
assertNotNull(withMultipleConstructor.map);
}
@Test
public void should_fail_if_an_argument_instance_type_do_not_match_wanted_type()
throws Exception {
Observer observer = mock(Observer.class);
Set<?> wrongArg = mock(Set.class);
given(resolver.resolveTypeInstances(ArgumentMatchers.<Class<?>[]>any()))
.willReturn(new Object[] {observer, wrongArg});
try {
new ParameterizedConstructorInstantiator(
this, field("withMultipleConstructor"), resolver)
.instantiate();
fail();
} catch (MockitoException e) {
assertThat(e.getMessage()).contains("argResolver").contains("incorrect types");
}
}
@Test
public void should_report_failure_if_constructor_throws_exception() throws Exception {
given(resolver.resolveTypeInstances(ArgumentMatchers.<Class<?>[]>any()))
.willReturn(new Object[] {null});
try {
new ParameterizedConstructorInstantiator(
this, field("withThrowingConstructor"), resolver)
.instantiate();
fail();
} catch (MockitoException e) {
assertThat(e.getMessage()).contains("constructor").contains("raised an exception");
}
}
@Test
public void should_instantiate_type_with_vararg_constructor() throws Exception {
Observer[] vararg = new Observer[] {};
given(resolver.resolveTypeInstances(ArgumentMatchers.any(Class[].class)))
.willReturn(new Object[] {"", vararg});
new ParameterizedConstructorInstantiator(this, field("withVarargConstructor"), resolver)
.instantiate();
assertNotNull(withVarargConstructor);
}
private Field field(String fieldName) throws NoSuchFieldException {
Field field = this.getClass().getDeclaredField(fieldName);
field.setAccessible(true);
return field;
}
private static class NoArgConstructor {
NoArgConstructor() {}
}
private static class OneConstructor {
public OneConstructor(Observer observer) {}
}
private static class ThrowingConstructor {
public ThrowingConstructor(Observer observer) throws IOException {
throw new IOException();
}
}
private static class MultipleConstructor extends OneConstructor {
Observer observer;
Map map;
public MultipleConstructor(Observer observer) {
this(observer, null);
}
public MultipleConstructor(Observer observer, Map map) {
super(observer);
this.observer = observer;
this.map = map;
}
}
private static class VarargConstructor {
VarargConstructor(String whatever, Observer... observers) {}
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockito/internal/util/reflection/ParameterizedConstructorInstantiatorTest.java |
# Copyright (C) 2016-2019 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import os
from typing import Dict, Iterable, Optional, Tuple
from repology.logger import Logger
from repology.packagemaker import NameType, PackageFactory, PackageMaker
from repology.parsers import Parser
from repology.parsers.maintainers import extract_maintainers
from repology.transformer import PackageTransformer
def _iter_packages(path: str) -> Iterable[Tuple[str, str]]:
for category in os.listdir(path):
if category.startswith('.'):
continue
category_path = os.path.join(path, category)
if not os.path.isdir(category_path):
continue
for package in os.listdir(category_path):
package_path = os.path.join(category_path, package)
if not os.path.isdir(package_path):
continue
yield category, package
def _parse_infofile(path: str) -> Dict[str, str]:
variables: Dict[str, str] = {}
with open(path, encoding='utf-8', errors='ignore') as infofile:
key: Optional[str] = None
total_value = []
for line in infofile:
line = line.strip()
if not line:
continue
if key: # continued
value = line
else: # new variable
key, value = line.split('=', 1)
value = value.lstrip('"').lstrip()
if value.endswith('\\'): # will continue
total_value.append(value.rstrip('\\').rstrip())
elif not value or value.endswith('"'):
total_value.append(value.rstrip('"').rstrip())
variables[key] = ' '.join(total_value)
key = None
total_value = []
return variables
class SlackBuildsParser(Parser):
def iter_parse(self, path: str, factory: PackageFactory, transformer: PackageTransformer) -> Iterable[PackageMaker]:
for category, pkgname in _iter_packages(path):
with factory.begin(category + '/' + pkgname) as pkg:
info_path = os.path.join(path, category, pkgname, pkgname + '.info')
if not os.path.isfile(info_path):
pkg.log('.info file does not exist', severity=Logger.ERROR)
continue
pkg.add_categories(category)
variables = _parse_infofile(info_path)
if variables['PRGNAM'] != pkgname:
pkg.log(f'PRGNAM "{variables["PRGNAM"]}" != pkgname "{pkgname}"', severity=Logger.ERROR)
continue
pkg.add_name(variables['PRGNAM'], NameType.SLACKBUILDS_NAME)
pkg.add_name(category + '/' + pkgname, NameType.SLACKBUILDS_FULL_NAME)
pkg.set_version(variables['VERSION'])
pkg.add_homepages(variables['HOMEPAGE'])
pkg.add_maintainers(extract_maintainers(variables['EMAIL']))
for key in ['DOWNLOAD', 'DOWNLOAD_x86_64']:
if variables[key] not in ['', 'UNSUPPORTED', 'UNTESTED']:
pkg.add_downloads(variables[key].split())
yield pkg | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
class StockInformation(models.Model):
_inherit = 'stock.information'
@api.multi
def _compute_week(self):
super(StockInformation, self)._compute_week()
p_obj = self.env['procurement.order']
move_obj = self.env['stock.move']
for line in self:
if line.first_week:
moves = move_obj._find_moves_from_stock_information(
line.company, line.last_day_week,
products=[line.product.id], location_id=line.location,
periods=False)
else:
moves = move_obj._find_moves_from_stock_information(
line.company, line.last_day_week,
products=[line.product.id], from_date=line.first_day_week,
location_id=line.location, periods=False)
line.outgoing_pending_amount = sum(moves.mapped('product_uom_qty'))
line.outgoing_pending_moves = [(6, 0, moves.ids)]
states = ['confirmed', 'exception']
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=False, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=False,
without_plan=False)
line.outgoing_pending_amount_reserv = sum(
procurements.mapped('product_qty'))
line.outgoing_pending_procurement_reserv = (
[(6, 0, procurements.ids)])
line.outgoing_pending_amount_moves = line.outgoing_pending_amount
line.outgoing_pending_amount += line.outgoing_pending_amount_reserv
states = ['confirmed', 'exception']
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=True, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=True,
without_plan=False)
line.incoming_pending_amount_plan = sum(
procurements.mapped('product_qty'))
line.incoming_pending_procurements_plan = (
[(6, 0, procurements.ids)])
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=False, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=False,
without_plan=False)
line.incoming_pending_amount_plan_reservation = sum(
procurements.mapped('product_qty'))
line.incoming_pending_procurements_plan_reservation = (
[(6, 0, procurements.ids)])
line.incoming_pending_amount += (
line.incoming_pending_amount_plan +
line.incoming_pending_amount_plan_reservation)
line.stock_availability = (line.qty_available - line.minimum_rule +
line.incoming_pending_amount)
if line.stock_availability >= line.outgoing_pending_amount:
line.virtual_stock = 0
else:
line.virtual_stock = (line.outgoing_pending_amount -
line.stock_availability)
incoming_pending_amount_plan = fields.Float(
'Incoming pending amount from plan', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Incoming from plan')
incoming_pending_amount_plan_required_run = fields.Float(
'Incoming from plan required run',
related='incoming_pending_amount_plan',
digits=dp.get_precision('Product Unit of Measure'), store=True)
incoming_pending_procurements_plan = fields.Many2many(
comodel_name='procurement.order',
string='Incoming pending procurements from plan',
relation='rel_stock_info_incoming_pending_procurement_plan',
column1='stock_info_id', column2='pending_procurement_plan_id',
compute='_compute_week')
incoming_pending_amount_plan_reservation = fields.Float(
'Incoming pending amount from plan reservation',
digits=dp.get_precision('Product Unit of Measure'),
compute='_compute_week', help='Incoming from plan reservation')
incoming_pending_amount_plan_reserv_required_run = fields.Float(
'Incoming from plan reserv required run',
related='incoming_pending_amount_plan_reservation',
digits=dp.get_precision('Product Unit of Measure'), store=True)
incoming_pending_procurements_plan_reservation = fields.Many2many(
comodel_name='procurement.order',
string='Incoming pending procurements from plan reservation',
relation='rel_stock_info_incoming_pending_procurement_plan_reserv',
column1='stock_info_id', column2='pending_procurement_plan_id',
compute='_compute_week')
outgoing_pending_amount_moves = fields.Float(
'Outgoing pending amount from moves', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Gross requirement')
outgoing_pending_amount_reserv = fields.Float(
'Outgoing pending amount reservation', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Gross requirement reservation')
outgoing_pending_procurement_reserv = fields.Many2many(
comodel_name='procurement.order',
string='Outgoing pending procurements reservation',
relation='rel_stock_info_outgoing_pending_procurement_reserv',
column1='stock_info_id', column2='pending_procurement_reserv_id',
compute='_compute_week')
@api.multi
def show_outgoing_pending_reserved_moves(self):
self.ensure_one()
return {'name': _('Outgoing pending reserved procurements'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'procurement.order',
'type': 'ir.actions.act_window',
'domain': [('id', 'in',
self.outgoing_pending_procurement_reserv.ids)]}
@api.multi
def show_incoming_procurements_from_plan(self):
self.ensure_one()
return {'name': _('Incoming procurements from plan'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'procurement.order',
'type': 'ir.actions.act_window',
'domain': [('id', 'in',
self.incoming_pending_procurements_plan.ids)]}
@api.multi
def show_incoming_procurements_from_plan_reservation(self):
self.ensure_one()
ids = self.incoming_pending_procurements_plan_reservation.ids
return {'name': _('Incoming procurements from plan reservation'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'procurement.order',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', ids)]} | unknown | codeparrot/codeparrot-clean | ||
use std::{collections::BTreeSet, sync::LazyLock};
use anyhow::{Context, Result};
use regex::Regex;
use serde::Serialize;
use turbo_esregex::EsRegex;
use turbo_rcstr::{RcStr, rcstr};
use turbo_tasks::{ResolvedVc, Vc};
use turbo_tasks_fs::{self, FileSystemEntryType, FileSystemPath, to_sys_path};
use turbopack::module_options::{ConditionItem, LoaderRuleItem};
use turbopack_core::{
issue::{Issue, IssueExt, IssueSeverity, IssueStage, OptionStyledString, StyledString},
reference_type::{CommonJsReferenceSubType, ReferenceType},
resolve::{node::node_cjs_resolve_options, parse::Request, pattern::Pattern, resolve},
source::Source,
};
use turbopack_node::transforms::webpack::WebpackLoaderItem;
use crate::{
next_config::{NextConfig, ReactCompilerCompilationMode, ReactCompilerOptions},
next_import_map::try_get_next_package,
next_shared::webpack_rules::{
ManuallyConfiguredBuiltinLoaderIssue, WebpackLoaderBuiltinCondition,
},
};
// https://babeljs.io/docs/config-files
// TODO: Also support a `babel` key in a package.json file
const BABEL_CONFIG_FILES: &[&str] = &[
".babelrc",
".babelrc.json",
".babelrc.js",
".babelrc.mjs",
".babelrc.cjs",
"babel.config.js",
"babel.config.json",
"babel.config.mjs",
"babel.config.cjs",
];
static BABEL_LOADER_RE: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(^|/)@?babel[-/]loader($|/|\.)").unwrap());
/// The forked version of babel-loader that we should use for automatic configuration. This version
/// is always available, as it's installed as part of next.js.
const NEXT_JS_BABEL_LOADER: &str = "next/dist/build/babel/loader";
const BABEL_PLUGIN_REACT_COMPILER: &str = "babel-plugin-react-compiler";
const BABEL_PLUGIN_REACT_COMPILER_PACKAGE_JSON: &str = "babel-plugin-react-compiler/package.json";
/// Detect manually-configured babel loaders. This is used to generate a warning, suggesting using
/// the built-in babel support.
async fn detect_likely_babel_loader(
webpack_rules: &[(RcStr, LoaderRuleItem)],
) -> Result<Option<RcStr>> {
for (glob, rule) in webpack_rules {
if rule
.loaders
.await?
.iter()
.any(|item| BABEL_LOADER_RE.is_match(&item.loader))
{
return Ok(Some(glob.clone()));
}
}
Ok(None)
}
/// If the user has a babel configuration file (see list above) alongside their `next.config.js`
/// configuration, automatically add `babel-loader` as a webpack loader for each eligible file type
/// if it doesn't already exist.
pub async fn get_babel_loader_rules(
project_path: &FileSystemPath,
next_config: Vc<NextConfig>,
builtin_conditions: &BTreeSet<WebpackLoaderBuiltinCondition>,
user_webpack_rules: &[(RcStr, LoaderRuleItem)],
) -> Result<Vec<(RcStr, LoaderRuleItem)>> {
// We never run babel over foreign code, under the assumption that `node_modules` code should
// not require any transforms that SWC does not provide. If somebody really needs this, they can
// manually configure a babel loader.
if builtin_conditions.contains(&WebpackLoaderBuiltinCondition::Foreign) {
return Ok(Vec::new());
}
let use_builtin_babel = next_config
.experimental_turbopack_use_builtin_babel()
.await?;
if use_builtin_babel.is_none()
&& let Some(glob) = detect_likely_babel_loader(user_webpack_rules).await?
{
ManuallyConfiguredBuiltinLoaderIssue {
glob,
loader: rcstr!("babel-loader"),
config_key: rcstr!("experimental.turbopackUseBuiltinBabel"),
config_file_path: next_config
.config_file_path(project_path.clone())
.owned()
.await?,
}
.resolved_cell()
.emit()
}
let mut babel_config_path = None;
if use_builtin_babel.unwrap_or(true) {
for &filename in BABEL_CONFIG_FILES {
let path = project_path.join(filename)?;
let filetype = *path.get_type().await?;
if matches!(filetype, FileSystemEntryType::File) {
babel_config_path = Some(path);
break;
}
}
}
let react_compiler_options = next_config.react_compiler_options().await?;
// if there's no babel config and react-compiler shouldn't be enabled, bail out early
if babel_config_path.is_none()
&& (react_compiler_options.is_none()
|| !builtin_conditions.contains(&WebpackLoaderBuiltinCondition::Browser))
{
return Ok(Vec::new());
}
// - See `packages/next/src/build/babel/loader/types.d.ts` for all the configuration options.
// - See `packages/next/src/build/get-babel-loader-config.ts` for how we use this in webpack.
let serde_json::Value::Object(mut loader_options) = serde_json::json!({
// `transformMode: default` (what the webpack implementation does) would run all of the
// Next.js-specific transforms as babel transforms. Because we always have to pay the cost
// of parsing with SWC after the webpack loader runs, we want to keep running those
// transforms using SWC, so use `standalone` instead.
"transformMode": "standalone",
"cwd": to_sys_path_str(project_path.clone()).await?,
"isServer": !builtin_conditions.contains(&WebpackLoaderBuiltinCondition::Browser),
}) else {
unreachable!("is an object")
};
if let Some(babel_config_path) = &babel_config_path {
loader_options.insert(
"configFile".to_owned(),
to_sys_path_str(babel_config_path.clone()).await?.into(),
);
}
let mut loader_conditions = Vec::new();
if let Some(react_compiler_options) = react_compiler_options.as_ref()
&& let Some(babel_plugin_path) =
resolve_babel_plugin_react_compiler(next_config, project_path).await?
{
let react_compiler_options = react_compiler_options.await?;
// we don't want to accept user-supplied `environment` options, but we do want to pass
// `enableNameAnonymousFunctions` down to the babel plugin based on dev/prod.
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct EnvironmentOptions {
enable_name_anonymous_functions: bool,
}
#[derive(Serialize)]
struct ResolvedOptions<'a> {
#[serde(flatten)]
base: &'a ReactCompilerOptions,
environment: EnvironmentOptions,
}
let resolved_options = ResolvedOptions {
base: &react_compiler_options,
environment: EnvironmentOptions {
enable_name_anonymous_functions: builtin_conditions
.contains(&WebpackLoaderBuiltinCondition::Development),
},
};
let react_compiler_plugins =
serde_json::Value::Array(vec![serde_json::Value::Array(vec![
serde_json::Value::String(babel_plugin_path.into_owned()),
serde_json::to_value(resolved_options)
.expect("react compiler options JSON serialization should never fail"),
])]);
loader_options.insert("reactCompilerPlugins".to_owned(), react_compiler_plugins);
if babel_config_path.is_none() {
// We're only running react-compiler, so add some extra conditions to limit when babel
// runs for performance reasons
//
// NOTE: we already bail out at the earlier if `foreign` condition is set or if
// `browser` is not set.
match react_compiler_options.compilation_mode {
ReactCompilerCompilationMode::Annotation => {
loader_conditions.push(ConditionItem::Base {
path: None,
content: Some(
EsRegex::new(r#"['"]use memo['"]"#, "")
.expect("valid const regex")
.resolved_cell(),
),
query: None,
content_type: None,
});
}
ReactCompilerCompilationMode::Infer => {
loader_conditions.push(ConditionItem::Base {
path: None,
// Matches declaration or useXXX or </ (closing jsx) or /> (self closing
// jsx)
content: Some(
EsRegex::new(r#"['"]use memo['"]|\Wuse[A-Z]|<\/|\/>"#, "")
.expect("valid const regex")
.resolved_cell(),
),
query: None,
content_type: None,
});
}
ReactCompilerCompilationMode::All => {}
}
}
}
Ok(vec![(
rcstr!("*.{js,jsx,ts,tsx,cjs,mjs,mts,cts}"),
LoaderRuleItem {
loaders: ResolvedVc::cell(vec![WebpackLoaderItem {
loader: rcstr!(NEXT_JS_BABEL_LOADER),
options: loader_options,
}]),
rename_as: Some(rcstr!("*")),
condition: Some(ConditionItem::All(loader_conditions.into())),
module_type: None,
},
)])
}
/// A system path that can be passed to the webpack loader
async fn to_sys_path_str(path: FileSystemPath) -> Result<String> {
let sys_path = to_sys_path(path)
.await?
.context("path should use a DiskFileSystem")?;
Ok(sys_path
.to_str()
.with_context(|| format!("{sys_path:?} is not valid utf-8"))?
.to_owned())
}
/// Resolve `babel-plugin-react-compiler` relative to `next`. This matches the behavior of the
/// webpack implementation, which resolves the Babel plugin from within `next`. The Babel plugin is
/// an optional peer dependency of `next`.
///
/// The returned path is relative to `project_path`. `project_path` should be the value given to
/// `babel-loader` using the `cwd` option.
pub async fn resolve_babel_plugin_react_compiler(
next_config: Vc<NextConfig>,
project_path: &FileSystemPath,
) -> Result<Option<RcStr>> {
let Some(next_package) = &*try_get_next_package(project_path.clone()).await? else {
BabelPluginReactCompilerResolutionIssue {
failed_resolution: rcstr!("next"),
config_file_path: next_config
.config_file_path(project_path.clone())
.owned()
.await?,
}
.resolved_cell()
.emit();
return Ok(None);
};
let babel_plugin_result = resolve(
next_package.clone(),
ReferenceType::CommonJs(CommonJsReferenceSubType::Undefined),
Request::parse(Pattern::Constant(rcstr!(
BABEL_PLUGIN_REACT_COMPILER_PACKAGE_JSON
))),
node_cjs_resolve_options(project_path.root().owned().await?),
);
let Some(source) = &*babel_plugin_result.first_source().await? else {
BabelPluginReactCompilerResolutionIssue {
failed_resolution: rcstr!(BABEL_PLUGIN_REACT_COMPILER),
config_file_path: next_config
.config_file_path(project_path.clone())
.owned()
.await?,
}
.resolved_cell()
.emit();
return Ok(None);
};
Ok(Some(
// the relative path should only ever fail to resolve when the `fs` is different, which
// should only happen due to eventual consistency.
project_path
.get_relative_path_to(&source.ident().path().await?.parent())
.context("failed to resolve relative path for react compiler plugin")?,
))
}
#[turbo_tasks::value]
struct BabelPluginReactCompilerResolutionIssue {
failed_resolution: RcStr,
config_file_path: FileSystemPath,
}
#[turbo_tasks::value_impl]
impl Issue for BabelPluginReactCompilerResolutionIssue {
#[turbo_tasks::function]
fn stage(&self) -> Vc<IssueStage> {
IssueStage::Transform.cell()
}
fn severity(&self) -> IssueSeverity {
IssueSeverity::Error
}
#[turbo_tasks::function]
fn file_path(&self) -> Vc<FileSystemPath> {
self.config_file_path.clone().cell()
}
#[turbo_tasks::function]
fn title(&self) -> Vc<StyledString> {
StyledString::Line(vec![
StyledString::Text(rcstr!("Failed to resolve package ")),
StyledString::Code(self.failed_resolution.clone()),
StyledString::Text(rcstr!(" while attempting to resolve React Compiler")),
])
.cell()
}
#[turbo_tasks::function]
fn description(&self) -> Vc<OptionStyledString> {
Vc::cell(Some(
StyledString::Line(vec![
StyledString::Text(rcstr!("React compiler is enabled in ")),
StyledString::Code(self.config_file_path.path.clone()),
StyledString::Text(rcstr!(
". We attempted to resolve React Compiler relative to the "
)),
StyledString::Code(rcstr!("next")),
StyledString::Text(rcstr!(" package. Is ")),
StyledString::Code(rcstr!(BABEL_PLUGIN_REACT_COMPILER)),
StyledString::Text(rcstr!(" installed in your ")),
StyledString::Code(rcstr!("node_modules")),
StyledString::Text(rcstr!(" directory?")),
])
.resolved_cell(),
))
}
} | rust | github | https://github.com/vercel/next.js | crates/next-core/src/next_shared/webpack_rules/babel.rs |
# -*- coding: utf-8 -*-
"""This file contains a parser for the Android usage-history.xml file."""
import os
from xml.etree import ElementTree
from dfvfs.helpers import text_file
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import interface
from plaso.parsers import manager
class AndroidAppUsageEvent(event.EventObject):
"""EventObject for an Android Application Last Resumed event."""
DATA_TYPE = u'android:event:last_resume_time'
def __init__(self, last_resume_time, package, component):
"""Initializes the event object.
Args:
last_resume_time: The Last Resume Time of an Android App with details of
individual components. The timestamp contains the number of
milliseconds since Jan 1, 1970 00:00:00 UTC.
package: The name of the Android App.
component: The individual component of the App.
"""
super(AndroidAppUsageEvent, self).__init__()
self.timestamp = timelib.Timestamp.FromJavaTime(last_resume_time)
self.package = package
self.component = component
self.timestamp_desc = eventdata.EventTimestamp.LAST_RESUME_TIME
class AndroidAppUsageParser(interface.SingleFileBaseParser):
"""Parses the Android usage-history.xml file."""
_INITIAL_FILE_OFFSET = None
NAME = u'android_app_usage'
DESCRIPTION = u'Parser for the Android usage-history.xml file.'
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses an Android usage-history file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_object.seek(0, os.SEEK_SET)
text_file_object = text_file.TextFile(file_object)
# Need to verify the first line to make sure this is a) XML and
# b) the right XML.
first_line = text_file_object.readline(90)
# Note that we must check the data here as a string first, otherwise
# forcing first_line to convert to Unicode can raise a UnicodeDecodeError.
if not first_line.startswith(b'<?xml'):
raise errors.UnableToParseFile(
u'Not an Android usage history file [not XML]')
# We read in the second line due to the fact that ElementTree
# reads the entire file in memory to parse the XML string and
# we only care about the XML file with the correct root key,
# which denotes a typed_history.xml file.
second_line = text_file_object.readline(50).strip()
# Note that we must check the data here as a string first, otherwise
# forcing second_line to convert to Unicode can raise a UnicodeDecodeError.
if second_line != b'<usage-history>':
raise errors.UnableToParseFile(
u'Not an Android usage history file [wrong XML root key]')
# The current offset of the file-like object needs to point at
# the start of the file for ElementTree to parse the XML data correctly.
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
root = xml.getroot()
for app in root:
for part in app.iter():
if part.tag == u'comp':
package = app.get(u'name', u'')
component = part.get(u'name', u'')
try:
last_resume_time = int(part.get(u'lrt', u''), 10)
except ValueError:
continue
event_object = AndroidAppUsageEvent(
last_resume_time, package, component)
parser_mediator.ProduceEvent(event_object)
manager.ParsersManager.RegisterParser(AndroidAppUsageParser) | unknown | codeparrot/codeparrot-clean | ||
# Logic copied from PEP 513
def is_manylinux1_compatible():
# Only Linux, and only x86-64 / i686
from distutils.util import get_platform
if get_platform() not in ["linux-x86_64", "linux-i686", "linux-s390x"]:
return False
# Check for presence of _manylinux module
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
# Fall through to heuristic check below
pass
# Check glibc version. CentOS 5 uses glibc 2.5.
return have_compatible_glibc(2, 5)
def have_compatible_glibc(major, minimum_minor):
import ctypes
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return False
# Call gnu_get_libc_version, which returns a string like "2.5".
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
# Parse string and check against requested version.
version = [int(piece) for piece in version_str.split(".")]
if len(version) != 2:
raise AssertionError(
f"Expected version to have 2 components (major.minor), got {len(version)}: {version_str}"
)
if major != version[0]:
return False
if minimum_minor > version[1]:
return False
return True
import sys
if is_manylinux1_compatible():
print(f"{sys.executable} is manylinux1 compatible")
sys.exit(0)
else:
print(f"{sys.executable} is NOT manylinux1 compatible")
sys.exit(1) | python | github | https://github.com/pytorch/pytorch | .ci/docker/manywheel/build_scripts/manylinux1-check.py |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
WARNING! Mock objects created by Mox are not thread-safe. If you are
call a mock in multiple threads, it should be guarded by a mutex.
TODO(stevepm): Add the option to make mocks thread-safe!
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import difflib
import inspect
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
if expected is None:
self._str = "Unexpected method call %s" % (unexpected_method,)
else:
differ = difflib.Differ()
diff = differ.compare(str(unexpected_method).splitlines(True),
str(expected).splitlines(True))
self._str = ("Unexpected method call. unexpected:- expected:+\n%s"
% ("\n".join(diff),))
def __str__(self):
return self._str
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class PrivateAttributeError(Error):
"""
Raised if a MockObject is passed a private additional attribute name.
"""
def __init__(self, attr):
Error.__init__(self)
self._attr = attr
def __str__(self):
return ("Attribute '%s' is private and should not be available in a mock "
"object." % attr)
class ExpectedMockCreationError(Error):
"""Raised if mocks should have been created by StubOutClassWithMocks."""
def __init__(self, expected_mocks):
"""Init exception.
Args:
# expected_mocks: A sequence of MockObjects that should have been
# created
Raises:
ValueError: if expected_mocks contains no methods.
"""
if not expected_mocks:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_mocks = expected_mocks
def __str__(self):
mocks = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_mocks)])
return "Verify: Expected mocks never created:\n%s" % (mocks,)
class UnexpectedMockCreationError(Error):
"""Raised if too many mocks were created by StubOutClassWithMocks."""
def __init__(self, instance, *params, **named_params):
"""Init exception.
Args:
# instance: the type of obejct that was created
# params: parameters given during instantiation
# named_params: named parameters given during instantiation
"""
Error.__init__(self)
self._instance = instance
self._params = params
self._named_params = named_params
def __str__(self):
args = ", ".join(["%s" % v for i, v in enumerate(self._params)])
error = "Unexpected mock creation: %s(%s" % (self._instance, args)
if self._named_params:
error += ", " + ", ".join(["%s=%s" % (k, v) for k, v in
self._named_params.iteritems()])
error += ")"
return error
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.FunctionType, types.InstanceType,
types.ModuleType, types.ObjectType, types.TypeType,
types.MethodType, types.UnboundMethodType,
]
# A list of types that may be stubbed out with a MockObjectFactory.
_USE_MOCK_FACTORY = [types.ClassType, types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock, attrs={}):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
attrs: dict of attribute names to values that will be set on the mock
object. Only public attributes may be set.
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock, attrs=attrs)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self, description=None):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
Args:
description: str. Optionally, a descriptive name for the mock object being
created, for debugging output purposes.
"""
new_mock = MockAnything(description=description)
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
attr_type = type(attr_to_replace)
if attr_type == MockAnything or attr_type == MockObject:
raise TypeError('Cannot mock a MockAnything! Did you remember to '
'call UnsetStubs in your previous test?')
if attr_type in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything(description='Stub for %s' % attr_to_replace)
self.stubs.Set(obj, attr_name, stub)
def StubOutClassWithMocks(self, obj, attr_name):
"""Replace a class with a "mock factory" that will create mock objects.
This is useful if the code-under-test directly instantiates
dependencies. Previously some boilder plate was necessary to
create a mock that would act as a factory. Using
StubOutClassWithMocks, once you've stubbed out the class you may
use the stubbed class as you would any other mock created by mox:
during the record phase, new mock instances will be created, and
during replay, the recorded mocks will be returned.
In replay mode
# Example using StubOutWithMock (the old, clunky way):
mock1 = mox.CreateMock(my_import.FooClass)
mock2 = mox.CreateMock(my_import.FooClass)
foo_factory = mox.StubOutWithMock(my_import, 'FooClass',
use_mock_anything=True)
foo_factory(1, 2).AndReturn(mock1)
foo_factory(9, 10).AndReturn(mock2)
mox.ReplayAll()
my_import.FooClass(1, 2) # Returns mock1 again.
my_import.FooClass(9, 10) # Returns mock2 again.
mox.VerifyAll()
# Example using StubOutClassWithMocks:
mox.StubOutClassWithMocks(my_import, 'FooClass')
mock1 = my_import.FooClass(1, 2) # Returns a new mock of FooClass
mock2 = my_import.FooClass(9, 10) # Returns another mock instance
mox.ReplayAll()
my_import.FooClass(1, 2) # Returns mock1 again.
my_import.FooClass(9, 10) # Returns mock2 again.
mox.VerifyAll()
"""
attr_to_replace = getattr(obj, attr_name)
attr_type = type(attr_to_replace)
if attr_type == MockAnything or attr_type == MockObject:
raise TypeError('Cannot mock a MockAnything! Did you remember to '
'call UnsetStubs in your previous test?')
if attr_type not in self._USE_MOCK_FACTORY:
raise TypeError('Given attr is not a Class. Use StubOutWithMock.')
factory = _MockObjectFactory(attr_to_replace, self)
self._mock_objects.append(factory)
self.stubs.Set(obj, attr_name, factory)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self, description=None):
"""Initialize a new MockAnything.
Args:
description: str. Optionally, a descriptive name for the mock object being
created, for debugging output purposes.
"""
self._description = description
self._Reset()
def __repr__(self):
if self._description:
return '<MockAnything instance of %s>' % self._description
else:
return '<MockAnything instance>'
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name, method_to_mock=None):
"""Create a new mock method call and return it.
Args:
# method_name: the name of the method being called.
# method_to_mock: The actual method being mocked, used for introspection.
method_name: str
method_to_mock: a method object
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode, method_to_mock=method_to_mock,
description=self._description)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock, attrs={}):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
attrs: dict of attribute names to values that will be set on the mock
object. Only public attributes may be set.
Raises:
PrivateAttributeError: if a supplied attribute is not public.
ValueError: if an attribute would mask an existing method.
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
try:
self._description = class_to_mock.__name__
# If class_to_mock is a mock itself, then we'll get an UnknownMethodCall
# error here from the underlying call to __getattr__('__name__')
except (UnknownMethodCallError, AttributeError):
try:
self._description = type(class_to_mock).__name__
except AttributeError:
pass
for method in dir(class_to_mock):
attr = getattr(class_to_mock, method)
if callable(attr):
self._known_methods.add(method)
elif not (type(attr) is property):
# treating properties as class vars makes little sense.
self._known_vars.add(method)
# Set additional attributes at instantiation time; this is quicker
# than manually setting attributes that are normally created in
# __init__.
for attr, value in attrs.items():
if attr.startswith("_"):
raise PrivateAttributeError(attr)
elif attr in self._known_methods:
raise ValueError("'%s' is a method of '%s' objects." % (attr,
class_to_mock))
else:
setattr(self, attr, value)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(
name,
method_to_mock=getattr(self._class_to_mock, name))
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
# Verify the class supports item assignment.
if '__setitem__' not in dir(self._class_to_mock):
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__getitem__.
"""
# Verify the class supports item assignment.
if '__getitem__' not in dir(self._class_to_mock):
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __iter__(self):
"""Provide custom logic for mocking classes that are iterable.
Returns:
Expected return value in replay mode. A MockMethod object for the
__iter__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not iterable.
UnexpectedMethodCallError if the object does not expect the call to
__iter__.
"""
methods = dir(self._class_to_mock)
# Verify the class supports iteration.
if '__iter__' not in methods:
# If it doesn't have iter method and we are in replay method, then try to
# iterate using subscripts.
if '__getitem__' not in methods or not self._replay_mode:
raise TypeError('not iterable object')
else:
results = []
index = 0
try:
while True:
results.append(self[index])
index += 1
except IndexError:
return iter(results)
# If we are in replay mode then simply call the mock __iter__ method.
if self._replay_mode:
return MockMethod('__iter__', self._expected_calls_queue,
self._replay_mode)()
# Otherwise, create a mock method __iter__.
return self._CreateMockMethod('__iter__')()
def __contains__(self, key):
"""Provide custom logic for mocking classes that contain items.
Args:
key: Key to look in container for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__contains__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not implement __contains__
UnexpectedMethodCaller if the object does not expect the call to
__contains__.
"""
contains = self._class_to_mock.__dict__.get('__contains__', None)
if contains is None:
raise TypeError('unsubscriptable object')
if self._replay_mode:
return MockMethod('__contains__', self._expected_calls_queue,
self._replay_mode)(key)
return self._CreateMockMethod('__contains__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable.
callable = hasattr(self._class_to_mock, '__call__')
if not callable:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
# If we are mocking a Function, then use the function, and not the
# __call__ method
method = None
if type(self._class_to_mock) in (types.FunctionType, types.MethodType):
method = self._class_to_mock;
else:
method = getattr(self._class_to_mock, '__call__')
mock_method = self._CreateMockMethod('__call__', method_to_mock=method)
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class _MockObjectFactory(MockObject):
"""A MockObjectFactory creates mocks and verifies __init__ params.
A MockObjectFactory removes the boiler plate code that was previously
necessary to stub out direction instantiation of a class.
The MockObjectFactory creates new MockObjects when called and verifies the
__init__ params are correct when in record mode. When replaying, existing
mocks are returned, and the __init__ params are verified.
See StubOutWithMock vs StubOutClassWithMocks for more detail.
"""
def __init__(self, class_to_mock, mox_instance):
MockObject.__init__(self, class_to_mock)
self._mox = mox_instance
self._instance_queue = deque()
def __call__(self, *params, **named_params):
"""Instantiate and record that a new mock has been created."""
method = getattr(self._class_to_mock, '__init__')
mock_method = self._CreateMockMethod('__init__', method_to_mock=method)
# Note: calling mock_method() is deferred in order to catch the
# empty instance_queue first.
if self._replay_mode:
if not self._instance_queue:
raise UnexpectedMockCreationError(self._class_to_mock, *params,
**named_params)
mock_method(*params, **named_params)
return self._instance_queue.pop()
else:
mock_method(*params, **named_params)
instance = self._mox.CreateMock(self._class_to_mock)
self._instance_queue.appendleft(instance)
return instance
def _Verify(self):
"""Verify that all mocks have been created."""
if self._instance_queue:
raise ExpectedMockCreationError(self._instance_queue)
super(_MockObjectFactory, self)._Verify()
class MethodSignatureChecker(object):
"""Ensures that methods are called correctly."""
_NEEDED, _DEFAULT, _GIVEN = range(3)
def __init__(self, method):
"""Creates a checker.
Args:
# method: A method to check.
method: function
Raises:
ValueError: method could not be inspected, so checks aren't possible.
Some methods and functions like built-ins can't be inspected.
"""
try:
self._args, varargs, varkw, defaults = inspect.getargspec(method)
except TypeError:
raise ValueError('Could not get argument specification for %r'
% (method,))
if inspect.ismethod(method):
self._args = self._args[1:] # Skip 'self'.
self._method = method
self._instance = None # May contain the instance this is bound to.
self._has_varargs = varargs is not None
self._has_varkw = varkw is not None
if defaults is None:
self._required_args = self._args
self._default_args = []
else:
self._required_args = self._args[:-len(defaults)]
self._default_args = self._args[-len(defaults):]
def _RecordArgumentGiven(self, arg_name, arg_status):
"""Mark an argument as being given.
Args:
# arg_name: The name of the argument to mark in arg_status.
# arg_status: Maps argument names to one of _NEEDED, _DEFAULT, _GIVEN.
arg_name: string
arg_status: dict
Raises:
AttributeError: arg_name is already marked as _GIVEN.
"""
if arg_status.get(arg_name, None) == MethodSignatureChecker._GIVEN:
raise AttributeError('%s provided more than once' % (arg_name,))
arg_status[arg_name] = MethodSignatureChecker._GIVEN
def Check(self, params, named_params):
"""Ensures that the parameters used while recording a call are valid.
Args:
# params: A list of positional parameters.
# named_params: A dict of named parameters.
params: list
named_params: dict
Raises:
AttributeError: the given parameters don't work with the given method.
"""
arg_status = dict((a, MethodSignatureChecker._NEEDED)
for a in self._required_args)
for arg in self._default_args:
arg_status[arg] = MethodSignatureChecker._DEFAULT
# WARNING: Suspect hack ahead.
#
# Check to see if this is an unbound method, where the instance
# should be bound as the first argument. We try to determine if
# the first argument (param[0]) is an instance of the class, or it
# is equivalent to the class (used to account for Comparators).
#
# NOTE: If a Func() comparator is used, and the signature is not
# correct, this will cause extra executions of the function.
if inspect.ismethod(self._method):
# The extra param accounts for the bound instance.
if len(params) == len(self._args) + 1:
clazz = getattr(self._method, 'im_class', None)
if isinstance(params[0], clazz) or params[0] == clazz:
params = params[1:]
# Check that each positional param is valid.
for i in range(len(params)):
try:
arg_name = self._args[i]
except IndexError:
if not self._has_varargs:
raise AttributeError('%s does not take %d or more positional '
'arguments' % (self._method.__name__, i))
else:
self._RecordArgumentGiven(arg_name, arg_status)
# Check each keyword argument.
for arg_name in named_params:
if arg_name not in arg_status and not self._has_varkw:
raise AttributeError('%s is not expecting keyword argument %s'
% (self._method.__name__, arg_name))
self._RecordArgumentGiven(arg_name, arg_status)
# Ensure all the required arguments have been given.
still_needed = [k for k, v in arg_status.iteritems()
if v == MethodSignatureChecker._NEEDED]
if still_needed:
raise AttributeError('No values given for arguments: %s'
% (' '.join(sorted(still_needed))))
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode,
method_to_mock=None, description=None):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
# method_to_mock: The actual method being mocked, used for introspection.
# description: optionally, a descriptive name for this method. Typically
# this is equal to the descriptive name of the method's class.
method_name: str
call_queue: list or deque
replay_mode: bool
method_to_mock: a method object
description: str or None
"""
self._name = method_name
self.__name__ = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._description = description
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
try:
self._checker = MethodSignatureChecker(method_to_mock)
except ValueError:
self._checker = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
if self._checker is not None:
self._checker.Check(params, named_params)
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
result = expected_method._side_effects(*params, **named_params)
if expected_method._return_value is None:
expected_method._return_value = result
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def __iter__(self):
"""Raise a TypeError with a helpful message."""
raise TypeError('MockMethod cannot be iterated. '
'Did you remember to put your mocks in replay mode?')
def next(self):
"""Raise a TypeError with a helpful message."""
raise TypeError('MockMethod cannot be iterated. '
'Did you remember to put your mocks in replay mode?')
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
full_desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
if self._description:
full_desc = "%s.%s" % (self._description, full_desc)
return full_desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class Is(Comparator):
"""Comparison class used to check identity, instead of equality."""
def __init__(self, obj):
self._obj = obj
def equals(self, rhs):
return rhs is self._obj
def __repr__(self):
return "<is %r (%s)>" % (self._obj, id(self._obj))
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % str(self._key)
class Not(Comparator):
"""Checks whether a predicates is False.
Example:
mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm', stevepm_user_info)))
"""
def __init__(self, predicate):
"""Initialize.
Args:
# predicate: a Comparator instance.
"""
assert isinstance(predicate, Comparator), ("predicate %r must be a"
" Comparator." % predicate)
self._predicate = predicate
def equals(self, rhs):
"""Check to see whether the predicate is False.
Args:
rhs: A value that will be given in argument of the predicate.
Returns:
bool
"""
return not self._predicate.equals(rhs)
def __repr__(self):
return '<not \'%s\'>' % self._predicate
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (str(self._key),
str(self._value))
class ContainsAttributeValue(Comparator):
"""Checks whether a passed parameter contains attributes with a given value.
Example:
mock_dao.UpdateSomething(ContainsAttribute('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: an attribute name of an object
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given attribute has a matching value in the rhs object.
Returns:
bool
"""
try:
return getattr(rhs, self._key) == self._value
except Exception:
return False
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_left = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
self._methods_left.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_left.discard(method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
return len(self._methods_left) == 0
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
if attr_name not in d:
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
stubout_obj = getattr(self, 'stubs', None)
cleanup_mox = False
cleanup_stubout = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
if stubout_obj and isinstance(stubout_obj, stubout.StubOutForTesting):
cleanup_stubout = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_stubout:
stubout_obj.UnsetAll()
stubout_obj.SmartUnsetAll()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox (any mox tests will
want this), and a "stubs" attribute that is an instance of StubOutForTesting
(needed at times). Also automatically unsets any stubs and verifies that all
mock methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
super(MoxTestBase, self).setUp()
self.mox = Mox()
self.stubs = stubout.StubOutForTesting() | unknown | codeparrot/codeparrot-clean | ||
import glob
import os
from subprocess import call
import shutil
import pytest
from tests.testing_harness import TestHarness
class TrackTestHarness(TestHarness):
def _test_output_created(self):
"""Make sure statepoint.* and track* have been created."""
TestHarness._test_output_created(self)
outputs = glob.glob('track_1_1_*.h5')
assert len(outputs) == 2, 'Expected two track files.'
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Run the track-to-vtk conversion script.
call(['../../../scripts/openmc-track-to-vtk', '-o', 'poly'] +
glob.glob('track_1_1_*.h5'))
# Make sure the vtk file was created then return it's contents.
assert os.path.isfile('poly.pvtp'), 'poly.pvtp file not found.'
with open('poly.pvtp', 'r') as fin:
outstr = fin.read()
return outstr
def _cleanup(self):
TestHarness._cleanup(self)
output = glob.glob('track*') + glob.glob('poly*')
for f in output:
if os.path.exists(f):
os.remove(f)
def test_track_output():
# If vtk python module is not available, we can't run track.py so skip this
# test.
vtk = pytest.importorskip('vtk')
harness = TrackTestHarness('statepoint.2.h5')
harness.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from __future__ import print_function
import IMP
import IMP.multifit
from IMP import OptionParser
__doc__ = "Write assembly transformation file in other formats."
class Formatter(object):
def __init__(self, fh):
self.fh = fh
def write_header(self, settings):
pass
class ChimeraFormatter(Formatter):
__doc__ = \
"""Each line in 'chimera' format lists the transformation index, the
cross correlation score, and then the transformation for each component,
as a rotation matrix (row-major order) and a translation in angstroms."""
def write_line(self, ind, score, transforms):
self.fh.write(str(ind) + "\t" + score + "\t")
for t in transforms:
for k in range(3):
v = t.get_rotation().get_rotation_matrix_row(k)
self.fh.write(
str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " ")
v = t.get_translation()
self.fh.write(
str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " | ")
self.fh.write("\n")
class DockRefFormatter(Formatter):
__doc__ = \
"""Each line in 'dockref' format lists the transformation for each component,
as a set of three Euler angles (in radians about the fixed x, y and z axes)
and a translation in angstroms."""
def write_header(self, sd):
# write the name of the proteins
for i in range(sd.get_number_of_component_headers()):
self.fh.write(sd.get_component_header(i).get_name() + "|")
self.fh.write("\n")
def write_line(self, ind, score, transforms):
for t in transforms:
r = t.get_rotation()
tr = t.get_translation()
eulers = IMP.algebra.get_fixed_xyz_from_rotation(r)
self.fh.write("%.6g %.6g %.6g %.6g %.6g %.6g|"
% (eulers.get_x(), eulers.get_y(), eulers.get_z(),
tr[0], tr[1], tr[2]))
self.fh.write("\n")
formatters = {'chimera': ChimeraFormatter,
'dockref': DockRefFormatter}
def parse_args():
usage = """%prog [options] <asmb.input> <scores> <output file>
Write assembly transformation file in other formats.
""" + "\n\n".join(x.__doc__ for x in formatters.values())
parser = OptionParser(usage)
parser.add_option("-f", "--format", default='chimera', type="choice",
choices=list(formatters.keys()),
help="type of output to generate ("
+ ", ".join(formatters.keys())
+ "; default: chimera)")
options, args = parser.parse_args()
if len(args) != 3:
parser.error("incorrect number of arguments")
return options, args
def run(asmb_fn, combs_fn, fmt):
sd = IMP.multifit.read_settings(asmb_fn)
sd.set_was_used(True)
fmt.write_header(sd)
# read the fitting files
fits = []
for i in range(sd.get_number_of_component_headers()):
fn = sd.get_component_header(i).get_transformations_fn()
fits.append(IMP.multifit.read_fitting_solutions(fn))
for ind, line in enumerate(open(combs_fn)):
s = line.split("|")
score = s[2]
comb = s[1]
fmt.write_line(ind, score,
[fits[i][int(c)].get_fit_transformation()
for i, c in enumerate(comb.split())])
def main():
options, args = parse_args()
fmt = formatters[options.format](open(args[2], 'w'))
run(args[0], args[1], fmt)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#!c:\python\python.exe
import sys
from xml.sax import make_parser
from xml.sax import ContentHandler
from xml.sax.handler import feature_namespaces
########################################################################################################################
class ParsePDML (ContentHandler):
def __init__ (self):
self.current = None
self.start_parsing = False
self.sulley = ""
def startElement (self, name, attributes):
if name == "proto":
self.current = attributes["name"]
# if parsing flag is set, we're past tcp
if self.start_parsing:
if not name == "field":
print "Found payload with name %s" % attributes["name"]
elif name == "field":
if "value" in attributes.keys():
val_string = self.get_string(attributes["value"])
if val_string:
self.sulley += "s_string(\"%s\")\n" % (val_string)
print self.sulley
#print "\tFound value: %s" % val_string
else:
# not string
pass
else:
raise "WTFException"
def characters (self, data):
pass
def endElement (self, name):
# if we're closing a packet
if name == "packet":
self.start_parsing = False
# if we're closing a proto tag
if name == "proto":
# and that proto is tcp, set parsing flag
if self.current == "tcp":
#print "Setting parsing flag to TRUE"
self.start_parsing = True
else:
self.start_parsing = False
def get_string(self, parsed):
parsed = parsed.replace("\t", "")
parsed = parsed.replace("\r", "")
parsed = parsed.replace("\n", "")
parsed = parsed.replace(",", "")
parsed = parsed.replace("0x", "")
parsed = parsed.replace("\\x", "")
value = ""
while parsed:
pair = parsed[:2]
parsed = parsed[2:]
hex = int(pair, 16)
if hex > 0x7f:
return False
value += chr(hex)
value = value.replace("\t", "")
value = value.replace("\r", "")
value = value.replace("\n", "")
value = value.replace(",", "")
value = value.replace("0x", "")
value = value.replace("\\x", "")
return value
def error (self, exception):
print "Oh shitz: ", exception
sys.exit(1)
########################################################################################################################
if __name__ == '__main__':
# create the parser object
parser = make_parser()
# dont care about xml namespace
parser.setFeature(feature_namespaces, 0)
# make the document handler
handler = ParsePDML()
# point parser to handler
parser.setContentHandler(handler)
# parse
parser.parse(sys.argv[1]) | unknown | codeparrot/codeparrot-clean | ||
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
# Set security warning to only go off once by default.
import warnings
warnings.simplefilter('module', exceptions.SecurityWarning)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for using the Google Compute Engine (GCE) API."""
import threading
import google.auth
from googleapiclient import discovery
thread_local = threading.local() # pylint: disable=invalid-name
NUM_RETRIES = 10
def initialize():
"""Initialize the thread-local configuration with things we need to use the
GCE API."""
credentials, _ = google.auth.default()
thread_local.service = discovery.build('compute',
'v1',
credentials=credentials)
def _get_instance_items(project, zone):
"""Return an iterator of all instance response items for a project."""
instances = thread_local.service.instances()
request = instances.list(project=project, zone=zone)
while request is not None:
response = request.execute(num_retries=NUM_RETRIES)
for instance in response['items']:
yield instance
request = instances.list_next(previous_request=request,
previous_response=response)
def get_instances(project, zone):
"""Return a list of all instance names in |project| and |zone|."""
for instance in _get_instance_items(project, zone):
yield instance['name']
def get_preempted_instances(project, zone):
"""Return a list of preempted instance names in |project| and |zone|."""
for instance in _get_instance_items(project, zone):
if (instance['scheduling']['preemptible'] and
instance['status'] == 'TERMINATED'):
yield instance['name']
def get_instance_group_managers():
"""Returns the instance group managers resource."""
return thread_local.service.instanceGroupManagers()
def get_instance_group_size(instance_group: str, project: str,
zone: str) -> int:
"""Returns the number of instances running in |instance_group|."""
managers = get_instance_group_managers()
request = managers.get(instanceGroupManager=instance_group,
project=project,
zone=zone)
return request.execute()['targetSize']
def resize_instance_group(size, instance_group, project, zone):
"""Changes the number of instances running in |instance_group| to |size|."""
assert size >= 1
managers = get_instance_group_managers()
request = managers.resize(instanceGroupManager=instance_group,
size=size,
project=project,
zone=zone)
return request.execute()
def delete_instance_group(instance_group, project, zone):
"""Deletes |instance_group|."""
managers = get_instance_group_managers()
request = managers.delete(instanceGroupManager=instance_group,
zone=zone,
project=project)
return request.execute()
def create_instance_group(name: str, instance_template_url: str,
base_instance_name: str, project: str, zone: str):
"""Creates an instance group named |name| from the template specified by
|instance_template_url|."""
managers = get_instance_group_managers()
target_size = 1
body = {
'baseInstanceName': base_instance_name,
'targetSize': target_size,
'name': name,
'instanceTemplate': instance_template_url
}
request = managers.insert(body=body, project=project, zone=zone)
return request.execute() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.configurationsample.source;
import org.springframework.boot.configurationsample.TestConfigurationProperties;
@TestConfigurationProperties("example")
public class ParentWithHintProperties extends SimpleSource {
/**
* Whether this is enabled.
*/
private boolean enabled;
public boolean isEnabled() {
return this.enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
} | java | github | https://github.com/spring-projects/spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/source/ParentWithHintProperties.java |
"""Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
import argparse
from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec(code, self.locals)
except SystemExit:
raise
except:
self.showtraceback()
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
if sys.excepthook is sys.__excepthook__:
lines = traceback.format_exception_only(type, value)
self.write(''.join(lines))
else:
# If someone has set sys.excepthook, we let that take precedence
# over self.write
sys.excepthook(type, value, tb)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
if sys.excepthook is sys.__excepthook__:
self.write(''.join(lines))
else:
# If someone has set sys.excepthook, we let that take precedence
# over self.write
sys.excepthook(ei[0], ei[1], last_tb)
finally:
last_tb = ei = None
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specifies the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
elif banner:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
input(); a subclass may replace this with a different
implementation.
"""
return input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-q', action='store_true',
help="don't print version and copyright messages")
args = parser.parse_args()
if args.q or sys.flags.quiet:
banner = ''
else:
banner = None
interact(banner) | unknown | codeparrot/codeparrot-clean | ||
{
"openFiles": ["src/app/app.routes.ts", "src/app/home/home.ts", "src/app/user/user.ts"],
"title": "Define a route",
"type": "editor"
} | json | github | https://github.com/angular/angular | adev/src/content/tutorials/learn-angular/steps/13-define-a-route/config.json |
"""
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Constants import POSSIBLE_ROTATIONS
from Cheetah.Template import Template
import pygtk
pygtk.require('2.0')
import gtk
import gobject
def rotate_pixmap(gc, src_pixmap, dst_pixmap, angle=gtk.gdk.PIXBUF_ROTATE_COUNTERCLOCKWISE):
"""
Load the destination pixmap with a rotated version of the source pixmap.
The source pixmap will be loaded into a pixbuf, rotated, and drawn to the destination pixmap.
The pixbuf is a client-side drawable, where a pixmap is a server-side drawable.
@param gc the graphics context
@param src_pixmap the source pixmap
@param dst_pixmap the destination pixmap
@param angle the angle to rotate by
"""
width, height = src_pixmap.get_size()
pixbuf = gtk.gdk.Pixbuf(
colorspace=gtk.gdk.COLORSPACE_RGB,
has_alpha=False, bits_per_sample=8,
width=width, height=height,
)
pixbuf.get_from_drawable(src_pixmap, src_pixmap.get_colormap(), 0, 0, 0, 0, -1, -1)
pixbuf = pixbuf.rotate_simple(angle)
dst_pixmap.draw_pixbuf(gc, pixbuf, 0, 0, 0, 0)
def get_rotated_coordinate(coor, rotation):
"""
Rotate the coordinate by the given rotation.
@param coor the coordinate x, y tuple
@param rotation the angle in degrees
@return the rotated coordinates
"""
#handles negative angles
rotation = (rotation + 360)%360
assert rotation in POSSIBLE_ROTATIONS
#determine the number of degrees to rotate
cos_r, sin_r = {
0: (1, 0),
90: (0, 1),
180: (-1, 0),
270: (0, -1),
}[rotation]
x, y = coor
return (x*cos_r + y*sin_r, -x*sin_r + y*cos_r)
def get_angle_from_coordinates((x1,y1), (x2,y2)):
"""
Given two points, calculate the vector direction from point1 to point2, directions are multiples of 90 degrees.
@param (x1,y1) the coordinate of point 1
@param (x2,y2) the coordinate of point 2
@return the direction in degrees
"""
if y1 == y2:#0 or 180
if x2 > x1: return 0
else: return 180
else:#90 or 270
if y2 > y1: return 270
else: return 90
def parse_template(tmpl_str, **kwargs):
"""
Parse the template string with the given args.
Pass in the xml encode method for pango escape chars.
@param tmpl_str the template as a string
@return a string of the parsed template
"""
kwargs['encode'] = gobject.markup_escape_text
return str(Template(tmpl_str, kwargs)) | unknown | codeparrot/codeparrot-clean | ||
//===--- SimplifyStrongRetainRelease.swift - strong_retain/release opt ----===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
import SIL
extension StrongRetainInst : Simplifiable, SILCombineSimplifiable {
func simplify(_ context: SimplifyContext) {
if isNotReferenceCounted(value: instance) {
context.erase(instruction: self)
return
}
// Sometimes in the stdlib due to hand offs, we will see code like:
//
// strong_release %0
// strong_retain %0
//
// with the matching strong_retain to the strong_release in a predecessor
// basic block and the matching strong_release for the strong_retain in a
// successor basic block.
//
// Due to the matching pairs being in different basic blocks, the ARC
// Optimizer (which is currently local to one basic block does not handle
// it). But that does not mean that we cannot eliminate this pair with a
// peephole.
if let prev = previous {
if let release = prev as? StrongReleaseInst {
if release.instance == self.instance {
context.erase(instruction: self)
context.erase(instruction: release)
return
}
}
}
}
}
extension StrongReleaseInst : Simplifiable, SILCombineSimplifiable {
func simplify(_ context: SimplifyContext) {
let op = instance
if isNotReferenceCounted(value: op) {
context.erase(instruction: self)
return
}
// Release of a classbound existential converted from a class is just a
// release of the class, squish the conversion.
if let ier = op as? InitExistentialRefInst {
if ier.uses.isSingleUse {
setOperand(at: 0, to: ier.instance, context)
context.erase(instruction: ier)
return
}
}
}
}
/// Returns true if \p value is something where reference counting instructions
/// don't have any effect.
private func isNotReferenceCounted(value: Value) -> Bool {
if value.type.isMarkedAsImmortal {
return true
}
switch value {
case let cfi as ConvertFunctionInst:
return isNotReferenceCounted(value: cfi.fromFunction)
case let uci as UpcastInst:
return isNotReferenceCounted(value: uci.fromInstance)
case let urc as UncheckedRefCastInst:
return isNotReferenceCounted(value: urc.fromInstance)
case let gvi as GlobalValueInst:
// Since Swift 5.1, statically allocated objects have "immortal" reference
// counts. Therefore we can safely eliminate unbalanced retains and
// releases, because they are no-ops on immortal objects.
// Note that the `simplifyGlobalValuePass` pass is deleting balanced
// retains/releases, which doesn't require a Swift 5.1 minimum deployment
// target.
return gvi.parentFunction.isSwift51RuntimeAvailable
case let rptr as RawPointerToRefInst:
// Like `global_value` but for the empty collection singletons from the
// stdlib, e.g. the empty Array singleton.
if rptr.parentFunction.isSwift51RuntimeAvailable {
// The pattern generated for empty collection singletons is:
// %0 = global_addr @_swiftEmptyArrayStorage
// %1 = address_to_pointer %0
// %2 = raw_pointer_to_ref %1
if let atp = rptr.pointer as? AddressToPointerInst {
return atp.address is GlobalAddrInst
}
}
return false
case // Thin functions are not reference counted.
is ThinToThickFunctionInst,
// The same for meta types.
is ObjCExistentialMetatypeToObjectInst,
is ObjCMetatypeToObjectInst,
// Retain and Release of tagged strings is a no-op.
// The builtin code pattern to find tagged strings is:
// builtin "stringObjectOr_Int64" (or to tag the string)
// value_to_bridge_object (cast the UInt to bridge object)
is ValueToBridgeObjectInst:
return true
default:
return false
}
} | swift | github | https://github.com/apple/swift | SwiftCompilerSources/Sources/Optimizer/InstructionSimplification/SimplifyStrongRetainRelease.swift |
# from .models import User, Playlist, Results, Video
from django.http import HttpResponse
# from django.template import loader
from django.shortcuts import render, redirect
# from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from .helpers import *
import json
# def ajax_test(request):
# return HttpResponse("foo")
def index(request):
request.session["TEST"] = "TRUE"
query = request.GET.get("query", False)
if query:
results = search_videos(query)
request.session["results"] = results
request.session.save()
if request.is_ajax():
if request.GET["need_html_results"] == "true":
results = render_to_string("quicklist/search_results.html",
context=None, request=request)
response_data = {"results": results}
return HttpResponse(json.dumps(response_data))
current_video_id = ""
playlist_exists = False
if "current_video_index" not in request.session:
request.session["current_video_index"] = 0
playlist = get_or_create_playlist(request)
print(playlist)
if len(playlist) > 0:
playlist_exists = True
current_video_id = playlist[request.session[
"current_video_index"]]["video_id"]
request.session["playlist"] = playlist
context = {'playlist': playlist,
'current_video_id': current_video_id,
'playlist_exists': playlist_exists,
'start_number': 0}
return render(request, 'quicklist/index.html', context)
def add(request, position, result_index):
print("foobar1234 {} {}".format(position, result_index))
playlist = get_or_create_playlist(request)
new_video = request.session["results"][int(result_index)]
playlist.append(new_video)
request.session['playlist'] = playlist
print("session playlist", request.session["playlist"])
request.session.save()
rendered_item = render_to_string("quicklist/playlist.html",
context={"playlist": [new_video],
"start_number": position},
request=request)
# return HttpResponse("hello")
# return HttpResponse("aaaaaaaaaaaaaaaaaaa")
return HttpResponse(json.dumps({"rendered_item": rendered_item,
"position": position}))
def remove(request, position):
playlist = get_or_create_playlist(request)
print("PLAYLIST IS A")
print(playlist)
# video_index_remove = int(request.GET["video_index_remove"])
print("video_index_remove is", position)
del playlist[int(position)]
request.session['playlist'] = playlist
request.session.save()
# rendered_playlist = render_to_string("quicklist/playlist.html",
# context={"playlist": playlist},
# request=request)
return HttpResponse(position)
def next_video(request):
if "current_video_index" not in request.session:
request.session["current_video_index"] = 0
playlist = get_or_create_playlist(request)
request.session["current_video_index"] += 1
if request.session["current_video_index"] >= len(playlist):
request.session["current_video_index"] = 0
print(playlist)
if len(playlist) > 0:
# playlist_exists = True
current_video_id = playlist[request.session[
"current_video_index"]]["video_id"]
return HttpResponse(json.dumps(current_video_id))
# request.session["current_video_index"] += 1
# if request.session["current_video_index"] >= \
# len(request.session["playlist"]):
# request.session["current_video_index"] = 0
# return redirect(reverse("index"))
def clear_playlist(request):
del request.session["playlist"]
return redirect(reverse("index"))
def clear_session(request):
for key in list(request.session.keys()):
del request.session[key]
return redirect(reverse("index"))
def asdf(request):
if 'count' in request.session:
request.session['count'] += 1
return HttpResponse('new count=%s' % request.session['count'])
else:
request.session['count'] = 1
return HttpResponse('No count in session. Setting to 1')
def ajax_test(request):
pass
if "asdf" not in request.session:
request.session["asdf"] = 1
else:
request.session["asdf"] += 2
test_data = {'a': 'aaa', 'b': 'bbb', 'c': 'ccc'}
a = json.dumps(test_data)
return HttpResponse(a)
def play_next(request):
pass | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2009-current, Redis Ltd.
* Copyright (c) 2012, Twitter, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "fmacros.h"
#include "fpconv_dtoa.h"
#include "fast_float_strtod.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <float.h>
#include <stdint.h>
#include <errno.h>
#include <time.h>
#include <sys/stat.h>
#include <dirent.h>
#include <fcntl.h>
#include <libgen.h>
#include "util.h"
#include "sha256.h"
#include "config.h"
#define UNUSED(x) ((void)(x))
/* Selectively define static_assert. Attempt to avoid include server.h in this file. */
#ifndef static_assert
#define static_assert(expr, lit) extern char __static_assert_failure[(expr) ? 1:-1]
#endif
static_assert(UINTPTR_MAX == 0xffffffffffffffff || UINTPTR_MAX == 0xffffffff, "Unsupported pointer size");
/* Glob-style pattern matching. */
static int stringmatchlen_impl(const char *pattern, int patternLen,
const char *string, int stringLen, int nocase, int *skipLongerMatches, int nesting)
{
/* Protection against abusive patterns. */
if (nesting > 1000) return 0;
while(patternLen && stringLen) {
switch(pattern[0]) {
case '*':
while (patternLen && pattern[1] == '*') {
pattern++;
patternLen--;
}
if (patternLen == 1)
return 1; /* match */
while(stringLen) {
if (stringmatchlen_impl(pattern+1, patternLen-1,
string, stringLen, nocase, skipLongerMatches, nesting+1))
return 1; /* match */
if (*skipLongerMatches)
return 0; /* no match */
string++;
stringLen--;
}
/* There was no match for the rest of the pattern starting
* from anywhere in the rest of the string. If there were
* any '*' earlier in the pattern, we can terminate the
* search early without trying to match them to longer
* substrings. This is because a longer match for the
* earlier part of the pattern would require the rest of the
* pattern to match starting later in the string, and we
* have just determined that there is no match for the rest
* of the pattern starting from anywhere in the current
* string. */
*skipLongerMatches = 1;
return 0; /* no match */
break;
case '?':
string++;
stringLen--;
break;
case '[':
{
int not, match;
pattern++;
patternLen--;
not = patternLen && pattern[0] == '^';
if (not) {
pattern++;
patternLen--;
}
match = 0;
while(1) {
if (patternLen >= 2 && pattern[0] == '\\') {
pattern++;
patternLen--;
if (pattern[0] == string[0])
match = 1;
} else if (patternLen == 0) {
pattern--;
patternLen++;
break;
} else if (pattern[0] == ']') {
break;
} else if (patternLen >= 3 && pattern[1] == '-') {
int start = pattern[0];
int end = pattern[2];
int c = string[0];
if (start > end) {
int t = start;
start = end;
end = t;
}
if (nocase) {
start = tolower(start);
end = tolower(end);
c = tolower(c);
}
pattern += 2;
patternLen -= 2;
if (c >= start && c <= end)
match = 1;
} else {
if (!nocase) {
if (pattern[0] == string[0])
match = 1;
} else {
if (tolower((int)pattern[0]) == tolower((int)string[0]))
match = 1;
}
}
pattern++;
patternLen--;
}
if (not)
match = !match;
if (!match)
return 0; /* no match */
string++;
stringLen--;
break;
}
case '\\':
if (patternLen >= 2) {
pattern++;
patternLen--;
}
/* fall through */
default:
if (!nocase) {
if (pattern[0] != string[0])
return 0; /* no match */
} else {
if (tolower((int)pattern[0]) != tolower((int)string[0]))
return 0; /* no match */
}
string++;
stringLen--;
break;
}
pattern++;
patternLen--;
if (stringLen == 0) {
while(patternLen && *pattern == '*') {
pattern++;
patternLen--;
}
break;
}
}
if (patternLen == 0 && stringLen == 0)
return 1;
return 0;
}
/*
* glob-style pattern matching to check if a given pattern fully includes
* the prefix of a string. For the match to succeed, the pattern must end with
* an unescaped '*' character.
*
* Returns: 1 if the `pattern` fully matches the `prefixStr`. Returns 0 otherwise.
*/
int prefixmatch(const char *pattern, int patternLen,
const char *prefixStr, int prefixStrLen, int nocase) {
int skipLongerMatches = 0;
/* Step 1: Verify if the pattern matches the prefix string completely. */
if (!stringmatchlen_impl(pattern, patternLen, prefixStr, prefixStrLen, nocase, &skipLongerMatches, 0))
return 0;
/* Step 2: Verify that the pattern ends with an unescaped '*', indicating
* it can match any suffix of the string beyond the prefix. This check
* remains outside stringmatchlen_impl() to keep its complexity manageable.
*/
if (patternLen == 0 || pattern[patternLen - 1] != '*' )
return 0;
/* Count backward the number of consecutive backslashes preceding the '*'
* to determine if the '*' is escaped. */
int backslashCount = 0;
for (int i = patternLen - 2; i >= 0; i--) {
if (pattern[i] == '\\')
++backslashCount;
else
break; /* Stop counting when a non-backslash character is found. */
}
/* Return 1 if the '*' is not escaped (i.e., even count), 0 otherwise. */
return (backslashCount % 2 == 0);
}
/* Glob-style pattern matching to a string. */
int stringmatchlen(const char *pattern, int patternLen,
const char *string, int stringLen, int nocase) {
int skipLongerMatches = 0;
return stringmatchlen_impl(pattern,patternLen,string,stringLen,nocase,&skipLongerMatches,0);
}
int stringmatch(const char *pattern, const char *string, int nocase) {
return stringmatchlen(pattern,strlen(pattern),string,strlen(string),nocase);
}
/* Fuzz stringmatchlen() trying to crash it with bad input. */
int stringmatchlen_fuzz_test(void) {
char str[32];
char pat[32];
int cycles = 10000000;
int total_matches = 0;
while(cycles--) {
int strlen = rand() % sizeof(str);
int patlen = rand() % sizeof(pat);
for (int j = 0; j < strlen; j++) str[j] = rand() % 128;
for (int j = 0; j < patlen; j++) pat[j] = rand() % 128;
total_matches += stringmatchlen(pat, patlen, str, strlen, 0);
}
return total_matches;
}
/* Convert a string representing an amount of memory into the number of
* bytes, so for instance memtoull("1Gb") will return 1073741824 that is
* (1024*1024*1024).
*
* On parsing error, if *err is not NULL, it's set to 1, otherwise it's
* set to 0. On error the function return value is 0, regardless of the
* fact 'err' is NULL or not. */
unsigned long long memtoull(const char *p, int *err) {
const char *u;
char buf[128];
long mul; /* unit multiplier */
unsigned long long val;
unsigned int digits;
if (err) *err = 0;
/* Search the first non digit character. */
u = p;
if (*u == '-') {
if (err) *err = 1;
return 0;
}
while(*u && isdigit(*u)) u++;
if (*u == '\0' || !strcasecmp(u,"b")) {
mul = 1;
} else if (!strcasecmp(u,"k")) {
mul = 1000;
} else if (!strcasecmp(u,"kb")) {
mul = 1024;
} else if (!strcasecmp(u,"m")) {
mul = 1000*1000;
} else if (!strcasecmp(u,"mb")) {
mul = 1024*1024;
} else if (!strcasecmp(u,"g")) {
mul = 1000L*1000*1000;
} else if (!strcasecmp(u,"gb")) {
mul = 1024L*1024*1024;
} else {
if (err) *err = 1;
return 0;
}
/* Copy the digits into a buffer, we'll use strtoll() to convert
* the digit (without the unit) into a number. */
digits = u-p;
if (digits >= sizeof(buf)) {
if (err) *err = 1;
return 0;
}
memcpy(buf,p,digits);
buf[digits] = '\0';
char *endptr;
errno = 0;
val = strtoull(buf,&endptr,10);
if ((val == 0 && errno == EINVAL) || *endptr != '\0') {
if (err) *err = 1;
return 0;
}
return val*mul;
}
/* Search a memory buffer for any set of bytes, like strpbrk().
* Returns pointer to first found char or NULL.
*/
const char *mempbrk(const char *s, size_t len, const char *chars, size_t charslen) {
for (size_t j = 0; j < len; j++) {
for (size_t n = 0; n < charslen; n++)
if (s[j] == chars[n]) return &s[j];
}
return NULL;
}
/* Modify the buffer replacing all occurrences of chars from the 'from'
* set with the corresponding char in the 'to' set. Always returns s.
*/
char *memmapchars(char *s, size_t len, const char *from, const char *to, size_t setlen) {
for (size_t j = 0; j < len; j++) {
for (size_t i = 0; i < setlen; i++) {
if (s[j] == from[i]) {
s[j] = to[i];
break;
}
}
}
return s;
}
/* Return the number of digits of 'v' when converted to string in radix 10.
* See ll2string() for more information. */
uint32_t digits10(uint64_t v) {
if (v < 10) return 1;
if (v < 100) return 2;
if (v < 1000) return 3;
if (v < 1000000000000UL) {
if (v < 100000000UL) {
if (v < 1000000) {
if (v < 10000) return 4;
return 5 + (v >= 100000);
}
return 7 + (v >= 10000000UL);
}
if (v < 10000000000UL) {
return 9 + (v >= 1000000000UL);
}
return 11 + (v >= 100000000000UL);
}
return 12 + digits10(v / 1000000000000UL);
}
/* Like digits10() but for signed values. */
uint32_t sdigits10(int64_t v) {
if (v < 0) {
/* Abs value of LLONG_MIN requires special handling. */
uint64_t uv = (v != LLONG_MIN) ?
(uint64_t)-v : ((uint64_t) LLONG_MAX)+1;
return digits10(uv)+1; /* +1 for the minus. */
} else {
return digits10(v);
}
}
/* Convert a long long into a string. Returns the number of
* characters needed to represent the number.
* If the buffer is not big enough to store the string, 0 is returned. */
int ll2string(char *dst, size_t dstlen, long long svalue) {
unsigned long long value;
int negative = 0;
/* The ull2string function with 64bit unsigned integers for simplicity, so
* we convert the number here and remember if it is negative. */
if (svalue < 0) {
if (svalue != LLONG_MIN) {
value = -svalue;
} else {
value = ((unsigned long long) LLONG_MAX)+1;
}
if (dstlen < 2)
goto err;
negative = 1;
dst[0] = '-';
dst++;
dstlen--;
} else {
value = svalue;
}
/* Converts the unsigned long long value to string*/
int length = ull2string(dst, dstlen, value);
if (length == 0) return 0;
return length + negative;
err:
/* force add Null termination */
if (dstlen > 0)
dst[0] = '\0';
return 0;
}
/* Convert a unsigned long long into a string. Returns the number of
* characters needed to represent the number.
* If the buffer is not big enough to store the string, 0 is returned.
*
* Based on the following article (that apparently does not provide a
* novel approach but only publicizes an already used technique):
*
* https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920 */
int ull2string(char *dst, size_t dstlen, unsigned long long value) {
static const char digits[201] =
"0001020304050607080910111213141516171819"
"2021222324252627282930313233343536373839"
"4041424344454647484950515253545556575859"
"6061626364656667686970717273747576777879"
"8081828384858687888990919293949596979899";
/* Check length. */
uint32_t length = digits10(value);
if (length >= dstlen) goto err;;
/* Null term. */
uint32_t next = length - 1;
dst[next + 1] = '\0';
while (value >= 100) {
int const i = (value % 100) * 2;
value /= 100;
dst[next] = digits[i + 1];
dst[next - 1] = digits[i];
next -= 2;
}
/* Handle last 1-2 digits. */
if (value < 10) {
dst[next] = '0' + (uint32_t) value;
} else {
int i = (uint32_t) value * 2;
dst[next] = digits[i + 1];
dst[next - 1] = digits[i];
}
return length;
err:
/* force add Null termination */
if (dstlen > 0)
dst[0] = '\0';
return 0;
}
/* Convert a string into a long long. Returns 1 if the string could be parsed
* into a (non-overflowing) long long, 0 otherwise. The value will be set to
* the parsed value when appropriate.
*
* Note that this function demands that the string strictly represents
* a long long: no spaces or other characters before or after the string
* representing the number are accepted, nor zeroes at the start if not
* for the string "0" representing the zero number.
*
* Because of its strictness, it is safe to use this function to check if
* you can convert a string into a long long, and obtain back the string
* from the number without any loss in the string representation. */
int string2ll(const char *s, size_t slen, long long *value) {
const char *p = s;
size_t plen = 0;
int negative = 0;
unsigned long long v;
/* A string of zero length or excessive length is not a valid number. */
if (plen == slen || slen >= LONG_STR_SIZE)
return 0;
/* Special case: first and only digit is 0. */
if (slen == 1 && p[0] == '0') {
if (value != NULL) *value = 0;
return 1;
}
/* Handle negative numbers: just set a flag and continue like if it
* was a positive number. Later convert into negative. */
if (p[0] == '-') {
negative = 1;
p++; plen++;
/* Abort on only a negative sign. */
if (plen == slen)
return 0;
}
/* First digit should be 1-9, otherwise the string should just be 0. */
if (p[0] >= '1' && p[0] <= '9') {
v = p[0]-'0';
p++; plen++;
} else {
return 0;
}
/* Parse all the other digits, checking for overflow at every step. */
while (plen < slen && p[0] >= '0' && p[0] <= '9') {
if (v > (ULLONG_MAX / 10)) /* Overflow. */
return 0;
v *= 10;
if (v > (ULLONG_MAX - (p[0]-'0'))) /* Overflow. */
return 0;
v += p[0]-'0';
p++; plen++;
}
/* Return if not all bytes were used. */
if (plen < slen)
return 0;
/* Convert to negative if needed, and do the final overflow check when
* converting from unsigned long long to long long. */
if (negative) {
if (v > ((unsigned long long)(-(LLONG_MIN+1))+1)) /* Overflow. */
return 0;
if (value != NULL) *value = -v;
} else {
if (v > LLONG_MAX) /* Overflow. */
return 0;
if (value != NULL) *value = v;
}
return 1;
}
/* Helper function to convert a string to an unsigned long long value.
* The function attempts to use the faster string2ll() function inside
* Redis: if it fails, strtoull() is used instead. The function returns
* 1 if the conversion happened successfully or 0 if the number is
* invalid or out of range. */
int string2ull(const char *s, unsigned long long *value) {
long long ll;
if (string2ll(s,strlen(s),&ll)) {
if (ll < 0) return 0; /* Negative values are out of range. */
*value = ll;
return 1;
}
errno = 0;
char *endptr = NULL;
*value = strtoull(s,&endptr,10);
if (errno == EINVAL || errno == ERANGE || !(*s != '\0' && *endptr == '\0'))
return 0; /* strtoull() failed. */
return 1; /* Conversion done! */
}
/* Convert a string into a long. Returns 1 if the string could be parsed into a
* (non-overflowing) long, 0 otherwise. The value will be set to the parsed
* value when appropriate. */
int string2l(const char *s, size_t slen, long *lval) {
long long llval;
if (!string2ll(s,slen,&llval))
return 0;
if (llval < LONG_MIN || llval > LONG_MAX)
return 0;
*lval = (long)llval;
return 1;
}
/* return 1 if c>= start && c <= end, 0 otherwise*/
static int safe_is_c_in_range(char c, char start, char end) {
if (c >= start && c <= end) return 1;
return 0;
}
static int base_16_char_type(char c) {
if (safe_is_c_in_range(c, '0', '9')) return 0;
if (safe_is_c_in_range(c, 'a', 'f')) return 1;
if (safe_is_c_in_range(c, 'A', 'F')) return 2;
return -1;
}
/** This is an async-signal safe version of string2l to convert unsigned long to string.
* The function translates @param src until it reaches a value that is not 0-9, a-f or A-F, or @param we read slen characters.
* On successes writes the result to @param result_output and returns 1.
* if the string represents an overflow value, return -1. */
int string2ul_base16_async_signal_safe(const char *src, size_t slen, unsigned long *result_output) {
static char ascii_to_dec[] = {'0', 'a' - 10, 'A' - 10};
int char_type = 0;
size_t curr_char_idx = 0;
unsigned long result = 0;
int base = 16;
while ((-1 != (char_type = base_16_char_type(src[curr_char_idx]))) &&
curr_char_idx < slen) {
unsigned long curr_val = src[curr_char_idx] - ascii_to_dec[char_type];
if ((result > ULONG_MAX / base) || (result > (ULONG_MAX - curr_val)/base)) /* Overflow. */
return -1;
result = result * base + curr_val;
++curr_char_idx;
}
*result_output = result;
return 1;
}
/* Convert a string into a double. Returns 1 if the string could be parsed
* into a (non-overflowing) double, 0 otherwise. The value will be set to
* the parsed value when appropriate.
*
* Note that this function demands that the string strictly represents
* a double: no spaces or other characters before or after the string
* representing the number are accepted. */
int string2ld(const char *s, size_t slen, long double *dp) {
char buf[MAX_LONG_DOUBLE_CHARS];
long double value;
char *eptr;
if (slen == 0 || slen >= sizeof(buf)) return 0;
memcpy(buf,s,slen);
buf[slen] = '\0';
errno = 0;
value = strtold(buf, &eptr);
if (isspace(buf[0]) || eptr[0] != '\0' ||
(size_t)(eptr-buf) != slen ||
(errno == ERANGE &&
(value == HUGE_VAL || value == -HUGE_VAL || fpclassify(value) == FP_ZERO)) ||
errno == EINVAL ||
isnan(value))
return 0;
if (dp) *dp = value;
return 1;
}
/* Convert a string into a double. Returns 1 if the string could be parsed
* into a (non-overflowing) double, 0 otherwise. The value will be set to
* the parsed value when appropriate.
*
* Note that this function demands that the string strictly represents
* a double: no spaces or other characters before or after the string
* representing the number are accepted. */
int string2d(const char *s, size_t slen, double *dp) {
errno = 0;
char *eptr;
/* Fast path to reject empty strings, or strings starting by space explicitly */
if (unlikely(slen == 0 ||
isspace(((const char*)s)[0])))
return 0;
*dp = fast_float_strtod(s, &eptr);
/* If `fast_float_strtod` didn't consume full input, try `strtod`
* Given fast_float does not support hexadecimal strings representation */
if (unlikely((size_t)(eptr - (char*)s) != slen)) {
char *fallback_eptr;
*dp = strtod(s, &fallback_eptr);
if ((size_t)(fallback_eptr - (char*)s) != slen) return 0;
}
if (unlikely(errno == EINVAL ||
(errno == ERANGE &&
(*dp == HUGE_VAL || *dp == -HUGE_VAL || fpclassify(*dp) == FP_ZERO)) ||
isnan(*dp)))
return 0;
return 1;
}
/* Returns 1 if the double value can safely be represented in long long without
* precision loss, in which case the corresponding long long is stored in the out variable. */
int double2ll(double d, long long *out) {
#if (DBL_MANT_DIG >= 52) && (DBL_MANT_DIG <= 63) && (LLONG_MAX == 0x7fffffffffffffffLL)
/* Check if the float is in a safe range to be casted into a
* long long. We are assuming that long long is 64 bit here.
* Also we are assuming that there are no implementations around where
* double has precision < 52 bit.
*
* Under this assumptions we test if a double is inside a range
* where casting to long long is safe. Then using two castings we
* make sure the decimal part is zero. If all this is true we can use
* integer without precision loss.
*
* Note that numbers above 2^52 and below 2^63 use all the fraction bits as real part,
* and the exponent bits are positive, which means the "decimal" part must be 0.
* i.e. all double values in that range are representable as a long without precision loss,
* but not all long values in that range can be represented as a double.
* we only care about the first part here. */
if (d < (double)(-LLONG_MAX/2) || d > (double)(LLONG_MAX/2))
return 0;
long long ll = d;
if (ll == d) {
*out = ll;
return 1;
}
#endif
return 0;
}
/* Convert a double to a string representation. Returns the number of bytes
* required. The representation should always be parsable by strtod(3).
* This function does not support human-friendly formatting like ld2string
* does. It is intended mainly to be used inside t_zset.c when writing scores
* into a listpack representing a sorted set. */
int d2string(char *buf, size_t len, double value) {
if (isnan(value)) {
/* Libc in some systems will format nan in a different way,
* like nan, -nan, NAN, nan(char-sequence).
* So we normalize it and create a single nan form in an explicit way. */
len = snprintf(buf,len,"nan");
} else if (isinf(value)) {
/* Libc in odd systems (Hi Solaris!) will format infinite in a
* different way, so better to handle it in an explicit way. */
if (value < 0)
len = snprintf(buf,len,"-inf");
else
len = snprintf(buf,len,"inf");
} else if (value == 0) {
/* See: http://en.wikipedia.org/wiki/Signed_zero, "Comparisons". */
if (1.0/value < 0)
len = snprintf(buf,len,"-0");
else
len = snprintf(buf,len,"0");
} else {
long long lvalue;
/* Integer printing function is much faster, check if we can safely use it. */
if (double2ll(value, &lvalue))
len = ll2string(buf,len,lvalue);
else {
len = fpconv_dtoa(value, buf);
buf[len] = '\0';
}
}
return len;
}
/* Convert a double into a string with 'fractional_digits' digits after the dot precision.
* This is an optimized version of snprintf "%.<fractional_digits>f".
* We convert the double to long and multiply it by 10 ^ <fractional_digits> to shift
* the decimal places.
* Note that multiply it of input value by 10 ^ <fractional_digits> can overflow but on the scenario
* that we currently use within redis this that is not possible.
* After we get the long representation we use the logic from ull2string function on this file
* which is based on the following article:
* https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920
*
* Input values:
* char: the buffer to store the string representation
* dstlen: the buffer length
* dvalue: the input double
* fractional_digits: the number of fractional digits after the dot precision. between 1 and 17
*
* Return values:
* Returns the number of characters needed to represent the number.
* If the buffer is not big enough to store the string, 0 is returned.
*/
int fixedpoint_d2string(char *dst, size_t dstlen, double dvalue, int fractional_digits) {
if (fractional_digits < 1 || fractional_digits > 17)
goto err;
/* min size of 2 ( due to 0. ) + n fractional_digitits + \0 */
if ((int)dstlen < (fractional_digits+3))
goto err;
if (dvalue == 0) {
dst[0] = '0';
dst[1] = '.';
memset(dst + 2, '0', fractional_digits);
dst[fractional_digits+2] = '\0';
return fractional_digits + 2;
}
/* scale and round */
static double powers_of_ten[] = {1.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0,
10000000.0, 100000000.0, 1000000000.0, 10000000000.0, 100000000000.0, 1000000000000.0,
10000000000000.0, 100000000000000.0, 1000000000000000.0, 10000000000000000.0,
100000000000000000.0 };
long long svalue = llrint(dvalue * powers_of_ten[fractional_digits]);
unsigned long long value;
/* write sign */
int negative = 0;
if (svalue < 0) {
if (svalue != LLONG_MIN) {
value = -svalue;
} else {
value = ((unsigned long long) LLONG_MAX)+1;
}
if (dstlen < 2)
goto err;
negative = 1;
dst[0] = '-';
dst++;
dstlen--;
} else {
value = svalue;
}
static const char digitsd[201] =
"0001020304050607080910111213141516171819"
"2021222324252627282930313233343536373839"
"4041424344454647484950515253545556575859"
"6061626364656667686970717273747576777879"
"8081828384858687888990919293949596979899";
/* Check length. */
uint32_t ndigits = digits10(value);
if (ndigits >= dstlen) goto err;
int integer_digits = ndigits - fractional_digits;
/* Fractional only check to avoid representing 0.7750 as .7750.
* This means we need to increment the length and store 0 as the first character.
*/
if (integer_digits < 1) {
dst[0] = '0';
integer_digits = 1;
}
dst[integer_digits] = '.';
int size = integer_digits + 1 + fractional_digits;
/* fill with 0 from fractional digits until size */
memset(dst + integer_digits + 1, '0', fractional_digits);
int next = size - 1;
while (value >= 100) {
int const i = (value % 100) * 2;
value /= 100;
dst[next] = digitsd[i + 1];
dst[next - 1] = digitsd[i];
next -= 2;
/* dot position */
if (next == integer_digits) {
next--;
}
}
/* Handle last 1-2 digits. */
if (value < 10) {
dst[next] = '0' + (uint32_t) value;
} else {
int i = (uint32_t) value * 2;
dst[next] = digitsd[i + 1];
dst[next - 1] = digitsd[i];
}
/* Null term. */
dst[size] = '\0';
return size + negative;
err:
/* force add Null termination */
if (dstlen > 0)
dst[0] = '\0';
return 0;
}
/* Trims off trailing zeros from a string representing a double. */
int trimDoubleString(char *buf, size_t len) {
if (strchr(buf,'.') != NULL) {
char *p = buf+len-1;
while(*p == '0') {
p--;
len--;
}
if (*p == '.') len--;
}
buf[len] = '\0';
return len;
}
/* Create a string object from a long double.
* If mode is humanfriendly it does not use exponential format and trims trailing
* zeroes at the end (may result in loss of precision).
* If mode is default exp format is used and the output of snprintf()
* is not modified (may result in loss of precision).
* If mode is hex hexadecimal format is used (no loss of precision)
*
* The function returns the length of the string or zero if there was not
* enough buffer room to store it. */
int ld2string(char *buf, size_t len, long double value, ld2string_mode mode) {
size_t l = 0;
if (isinf(value)) {
/* Libc in odd systems (Hi Solaris!) will format infinite in a
* different way, so better to handle it in an explicit way. */
if (len < 5) goto err; /* No room. 5 is "-inf\0" */
if (value > 0) {
memcpy(buf,"inf",3);
l = 3;
} else {
memcpy(buf,"-inf",4);
l = 4;
}
} else if (isnan(value)) {
/* Libc in some systems will format nan in a different way,
* like nan, -nan, NAN, nan(char-sequence).
* So we normalize it and create a single nan form in an explicit way. */
if (len < 4) goto err; /* No room. 4 is "nan\0" */
memcpy(buf, "nan", 3);
l = 3;
} else {
switch (mode) {
case LD_STR_AUTO:
l = snprintf(buf,len,"%.17Lg",value);
if (l+1 > len) goto err;; /* No room. */
break;
case LD_STR_HEX:
l = snprintf(buf,len,"%La",value);
if (l+1 > len) goto err; /* No room. */
break;
case LD_STR_HUMAN:
/* We use 17 digits precision since with 128 bit floats that precision
* after rounding is able to represent most small decimal numbers in a
* way that is "non surprising" for the user (that is, most small
* decimal numbers will be represented in a way that when converted
* back into a string are exactly the same as what the user typed.) */
l = snprintf(buf,len,"%.17Lf",value);
if (l+1 > len) goto err; /* No room. */
/* Now remove trailing zeroes after the '.' */
if (strchr(buf,'.') != NULL) {
char *p = buf+l-1;
while(*p == '0') {
p--;
l--;
}
if (*p == '.') l--;
}
if (l == 2 && buf[0] == '-' && buf[1] == '0') {
buf[0] = '0';
l = 1;
}
break;
default: goto err; /* Invalid mode. */
}
}
buf[l] = '\0';
return l;
err:
/* force add Null termination */
if (len > 0)
buf[0] = '\0';
return 0;
}
/* Get random bytes, attempts to get an initial seed from /dev/urandom and
* the uses a one way hash function in counter mode to generate a random
* stream. However if /dev/urandom is not available, a weaker seed is used.
*
* This function is not thread safe, since the state is global. */
void getRandomBytes(unsigned char *p, size_t len) {
/* Global state. */
static int seed_initialized = 0;
static unsigned char seed[64]; /* 512 bit internal block size. */
static uint64_t counter = 0; /* The counter we hash with the seed. */
if (!seed_initialized) {
/* Initialize a seed and use SHA1 in counter mode, where we hash
* the same seed with a progressive counter. For the goals of this
* function we just need non-colliding strings, there are no
* cryptographic security needs. */
FILE *fp = fopen("/dev/urandom","r");
if (fp == NULL || fread(seed,sizeof(seed),1,fp) != 1) {
/* Revert to a weaker seed, and in this case reseed again
* at every call.*/
for (unsigned int j = 0; j < sizeof(seed); j++) {
struct timeval tv;
gettimeofday(&tv,NULL);
pid_t pid = getpid();
seed[j] = tv.tv_sec ^ tv.tv_usec ^ pid ^ (long)fp;
}
} else {
seed_initialized = 1;
}
if (fp) fclose(fp);
}
while(len) {
/* This implements SHA256-HMAC. */
unsigned char digest[SHA256_BLOCK_SIZE];
unsigned char kxor[64];
unsigned int copylen =
len > SHA256_BLOCK_SIZE ? SHA256_BLOCK_SIZE : len;
/* IKEY: key xored with 0x36. */
memcpy(kxor,seed,sizeof(kxor));
for (unsigned int i = 0; i < sizeof(kxor); i++) kxor[i] ^= 0x36;
/* Obtain HASH(IKEY||MESSAGE). */
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx,kxor,sizeof(kxor));
sha256_update(&ctx,(unsigned char*)&counter,sizeof(counter));
sha256_final(&ctx,digest);
/* OKEY: key xored with 0x5c. */
memcpy(kxor,seed,sizeof(kxor));
for (unsigned int i = 0; i < sizeof(kxor); i++) kxor[i] ^= 0x5C;
/* Obtain HASH(OKEY || HASH(IKEY||MESSAGE)). */
sha256_init(&ctx);
sha256_update(&ctx,kxor,sizeof(kxor));
sha256_update(&ctx,digest,SHA256_BLOCK_SIZE);
sha256_final(&ctx,digest);
/* Increment the counter for the next iteration. */
counter++;
memcpy(p,digest,copylen);
len -= copylen;
p += copylen;
}
}
/* Generate the Redis "Run ID", a SHA1-sized random number that identifies a
* given execution of Redis, so that if you are talking with an instance
* having run_id == A, and you reconnect and it has run_id == B, you can be
* sure that it is either a different instance or it was restarted. */
void getRandomHexChars(char *p, size_t len) {
char *charset = "0123456789abcdef";
size_t j;
getRandomBytes((unsigned char*)p,len);
for (j = 0; j < len; j++) p[j] = charset[p[j] & 0x0F];
}
/* Given the filename, return the absolute path as an SDS string, or NULL
* if it fails for some reason. Note that "filename" may be an absolute path
* already, this will be detected and handled correctly.
*
* The function does not try to normalize everything, but only the obvious
* case of one or more "../" appearing at the start of "filename"
* relative path. */
sds getAbsolutePath(char *filename) {
char cwd[1024];
sds abspath;
sds relpath = sdsnew(filename);
relpath = sdstrim(relpath," \r\n\t");
if (relpath[0] == '/') return relpath; /* Path is already absolute. */
/* If path is relative, join cwd and relative path. */
if (getcwd(cwd,sizeof(cwd)) == NULL) {
sdsfree(relpath);
return NULL;
}
abspath = sdsnew(cwd);
if (sdslen(abspath) && abspath[sdslen(abspath)-1] != '/')
abspath = sdscat(abspath,"/");
/* At this point we have the current path always ending with "/", and
* the trimmed relative path. Try to normalize the obvious case of
* trailing ../ elements at the start of the path.
*
* For every "../" we find in the filename, we remove it and also remove
* the last element of the cwd, unless the current cwd is "/". */
while (sdslen(relpath) >= 3 &&
relpath[0] == '.' && relpath[1] == '.' && relpath[2] == '/')
{
sdsrange(relpath,3,-1);
if (sdslen(abspath) > 1) {
char *p = abspath + sdslen(abspath)-2;
int trimlen = 1;
while(*p != '/') {
p--;
trimlen++;
}
sdsrange(abspath,0,-(trimlen+1));
}
}
/* Finally glue the two parts together. */
abspath = sdscatsds(abspath,relpath);
sdsfree(relpath);
return abspath;
}
/*
* Gets the proper timezone in a more portable fashion
* i.e timezone variables are linux specific.
*/
long getTimeZone(void) {
#if defined(__linux__) || defined(__sun)
return timezone;
#else
struct timezone tz;
gettimeofday(NULL, &tz);
return tz.tz_minuteswest * 60L;
#endif
}
/* Return true if the specified path is just a file basename without any
* relative or absolute path. This function just checks that no / or \
* character exists inside the specified path, that's enough in the
* environments where Redis runs. */
int pathIsBaseName(char *path) {
return strchr(path,'/') == NULL && strchr(path,'\\') == NULL;
}
int fileExist(char *filename) {
struct stat statbuf;
return stat(filename, &statbuf) == 0 && S_ISREG(statbuf.st_mode);
}
int dirExists(char *dname) {
struct stat statbuf;
return stat(dname, &statbuf) == 0 && S_ISDIR(statbuf.st_mode);
}
int dirCreateIfMissing(char *dname) {
if (mkdir(dname, 0755) != 0) {
if (errno != EEXIST) {
return -1;
} else if (!dirExists(dname)) {
errno = ENOTDIR;
return -1;
}
}
return 0;
}
int dirRemove(char *dname) {
DIR *dir;
struct stat stat_entry;
struct dirent *entry;
char full_path[PATH_MAX + 1];
if ((dir = opendir(dname)) == NULL) {
return -1;
}
while ((entry = readdir(dir)) != NULL) {
if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) continue;
snprintf(full_path, sizeof(full_path), "%s/%s", dname, entry->d_name);
int fd = open(full_path, O_RDONLY|O_NONBLOCK);
if (fd == -1) {
closedir(dir);
return -1;
}
if (fstat(fd, &stat_entry) == -1) {
close(fd);
closedir(dir);
return -1;
}
close(fd);
if (S_ISDIR(stat_entry.st_mode) != 0) {
if (dirRemove(full_path) == -1) {
closedir(dir);
return -1;
}
continue;
}
if (unlink(full_path) != 0) {
closedir(dir);
return -1;
}
}
if (rmdir(dname) != 0) {
closedir(dir);
return -1;
}
closedir(dir);
return 0;
}
sds makePath(char *path, char *filename) {
return sdscatfmt(sdsempty(), "%s/%s", path, filename);
}
/* Given the filename, sync the corresponding directory.
*
* Usually a portable and safe pattern to overwrite existing files would be like:
* 1. create a new temp file (on the same file system!)
* 2. write data to the temp file
* 3. fsync() the temp file
* 4. rename the temp file to the appropriate name
* 5. fsync() the containing directory */
int fsyncFileDir(const char *filename) {
#ifdef _AIX
/* AIX is unable to fsync a directory */
return 0;
#endif
char temp_filename[PATH_MAX + 1];
char *dname;
int dir_fd;
if (strlen(filename) > PATH_MAX) {
errno = ENAMETOOLONG;
return -1;
}
/* In the glibc implementation dirname may modify their argument. */
memcpy(temp_filename, filename, strlen(filename) + 1);
dname = dirname(temp_filename);
dir_fd = open(dname, O_RDONLY);
if (dir_fd == -1) {
/* Some OSs don't allow us to open directories at all, just
* ignore the error in that case */
if (errno == EISDIR) {
return 0;
}
return -1;
}
/* Some OSs don't allow us to fsync directories at all, so we can ignore
* those errors. */
if (redis_fsync(dir_fd) == -1 && !(errno == EBADF || errno == EINVAL)) {
int save_errno = errno;
close(dir_fd);
errno = save_errno;
return -1;
}
close(dir_fd);
return 0;
}
/* free OS pages backed by file */
int reclaimFilePageCache(int fd, size_t offset, size_t length) {
#ifdef HAVE_FADVISE
int ret = posix_fadvise(fd, offset, length, POSIX_FADV_DONTNEED);
if (ret) {
errno = ret;
return -1;
}
return 0;
#else
UNUSED(fd);
UNUSED(offset);
UNUSED(length);
return 0;
#endif
}
/** An async signal safe version of fgets().
* Has the same behaviour as standard fgets(): reads a line from fd and stores it into the dest buffer.
* It stops when either (buff_size-1) characters are read, the newline character is read, or the end-of-file is reached,
* whichever comes first.
*
* On success, the function returns the same dest parameter. If the End-of-File is encountered and no characters have
* been read, the contents of dest remain unchanged and a null pointer is returned.
* If an error occurs, a null pointer is returned. */
char *fgets_async_signal_safe(char *dest, int buff_size, int fd) {
for (int i = 0; i < buff_size; i++) {
/* Read one byte */
ssize_t bytes_read_count = read(fd, dest + i, 1);
/* On EOF or error return NULL */
if (bytes_read_count < 1) {
return NULL;
}
/* we found the end of the line. */
if (dest[i] == '\n') {
break;
}
}
return dest;
}
static const char HEX[] = "0123456789abcdef";
static char *u2string_async_signal_safe(int _base, uint64_t val, char *buf) {
uint32_t base = (uint32_t) _base;
*buf-- = 0;
do {
*buf-- = HEX[val % base];
} while ((val /= base) != 0);
return buf + 1;
}
static char *i2string_async_signal_safe(int base, int64_t val, char *buf) {
char *orig_buf = buf;
const int32_t is_neg = (val < 0);
*buf-- = 0;
if (is_neg) {
val = -val;
}
if (is_neg && base == 16) {
int ix;
val -= 1;
for (ix = 0; ix < 16; ++ix)
buf[-ix] = '0';
}
do {
*buf-- = HEX[val % base];
} while ((val /= base) != 0);
if (is_neg && base == 10) {
*buf-- = '-';
}
if (is_neg && base == 16) {
int ix;
buf = orig_buf - 1;
for (ix = 0; ix < 16; ++ix, --buf) {
/* *INDENT-OFF* */
switch (*buf) {
case '0': *buf = 'f'; break;
case '1': *buf = 'e'; break;
case '2': *buf = 'd'; break;
case '3': *buf = 'c'; break;
case '4': *buf = 'b'; break;
case '5': *buf = 'a'; break;
case '6': *buf = '9'; break;
case '7': *buf = '8'; break;
case '8': *buf = '7'; break;
case '9': *buf = '6'; break;
case 'a': *buf = '5'; break;
case 'b': *buf = '4'; break;
case 'c': *buf = '3'; break;
case 'd': *buf = '2'; break;
case 'e': *buf = '1'; break;
case 'f': *buf = '0'; break;
}
/* *INDENT-ON* */
}
}
return buf + 1;
}
static const char *check_longlong_async_signal_safe(const char *fmt, int32_t *have_longlong) {
*have_longlong = 0;
if (*fmt == 'l') {
fmt++;
if (*fmt != 'l') {
*have_longlong = (sizeof(long) == sizeof(int64_t));
} else {
fmt++;
*have_longlong = 1;
}
}
return fmt;
}
int vsnprintf_async_signal_safe(char *to, size_t size, const char *format, va_list ap) {
char *start = to;
char *end = start + size - 1;
for (; *format; ++format) {
int32_t have_longlong = 0;
if (*format != '%') {
if (to == end) { /* end of buffer */
break;
}
*to++ = *format; /* copy ordinary char */
continue;
}
++format; /* skip '%' */
format = check_longlong_async_signal_safe(format, &have_longlong);
switch (*format) {
case 'd':
case 'i':
case 'u':
case 'x':
case 'p':
{
int64_t ival = 0;
uint64_t uval = 0;
if (*format == 'p')
have_longlong = (sizeof(void *) == sizeof(uint64_t));
if (have_longlong) {
if (*format == 'u') {
uval = va_arg(ap, uint64_t);
} else {
ival = va_arg(ap, int64_t);
}
} else {
if (*format == 'u') {
uval = va_arg(ap, uint32_t);
} else {
ival = va_arg(ap, int32_t);
}
}
{
char buff[22];
const int base = (*format == 'x' || *format == 'p') ? 16 : 10;
/* *INDENT-OFF* */
char *val_as_str = (*format == 'u') ?
u2string_async_signal_safe(base, uval, &buff[sizeof(buff) - 1]) :
i2string_async_signal_safe(base, ival, &buff[sizeof(buff) - 1]);
/* *INDENT-ON* */
/* Strip off "ffffffff" if we have 'x' format without 'll' */
if (*format == 'x' && !have_longlong && ival < 0) {
val_as_str += 8;
}
while (*val_as_str && to < end) {
*to++ = *val_as_str++;
}
continue;
}
}
case 's':
{
const char *val = va_arg(ap, char *);
if (!val) {
val = "(null)";
}
while (*val && to < end) {
*to++ = *val++;
}
continue;
}
}
}
*to = 0;
return (int)(to - start);
}
int snprintf_async_signal_safe(char *to, size_t n, const char *fmt, ...) {
int result;
va_list args;
va_start(args, fmt);
result = vsnprintf_async_signal_safe(to, n, fmt, args);
va_end(args);
return result;
}
#ifdef REDIS_TEST
#include <assert.h>
#include <sys/mman.h>
#include "testhelp.h"
static void test_string2ll(void) {
char buf[32];
long long v;
/* May not start with +. */
redis_strlcpy(buf,"+1",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 0);
/* Leading space. */
redis_strlcpy(buf," 1",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 0);
/* Trailing space. */
redis_strlcpy(buf,"1 ",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 0);
/* May not start with 0. */
redis_strlcpy(buf,"01",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 0);
redis_strlcpy(buf,"-1",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 1);
assert(v == -1);
redis_strlcpy(buf,"0",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 1);
assert(v == 0);
redis_strlcpy(buf,"1",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 1);
assert(v == 1);
redis_strlcpy(buf,"99",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 1);
assert(v == 99);
redis_strlcpy(buf,"-99",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 1);
assert(v == -99);
redis_strlcpy(buf,"-9223372036854775808",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 1);
assert(v == LLONG_MIN);
redis_strlcpy(buf,"-9223372036854775809",sizeof(buf)); /* overflow */
assert(string2ll(buf,strlen(buf),&v) == 0);
redis_strlcpy(buf,"9223372036854775807",sizeof(buf));
assert(string2ll(buf,strlen(buf),&v) == 1);
assert(v == LLONG_MAX);
redis_strlcpy(buf,"9223372036854775808",sizeof(buf)); /* overflow */
assert(string2ll(buf,strlen(buf),&v) == 0);
}
static void test_string2l(void) {
char buf[32];
long v;
/* May not start with +. */
redis_strlcpy(buf,"+1",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 0);
/* May not start with 0. */
redis_strlcpy(buf,"01",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 0);
redis_strlcpy(buf,"-1",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 1);
assert(v == -1);
redis_strlcpy(buf,"0",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 1);
assert(v == 0);
redis_strlcpy(buf,"1",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 1);
assert(v == 1);
redis_strlcpy(buf,"99",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 1);
assert(v == 99);
redis_strlcpy(buf,"-99",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 1);
assert(v == -99);
#if LONG_MAX != LLONG_MAX
redis_strlcpy(buf,"-2147483648",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 1);
assert(v == LONG_MIN);
redis_strlcpy(buf,"-2147483649",sizeof(buf)); /* overflow */
assert(string2l(buf,strlen(buf),&v) == 0);
redis_strlcpy(buf,"2147483647",sizeof(buf));
assert(string2l(buf,strlen(buf),&v) == 1);
assert(v == LONG_MAX);
redis_strlcpy(buf,"2147483648",sizeof(buf)); /* overflow */
assert(string2l(buf,strlen(buf),&v) == 0);
#endif
}
static void test_string2d(void) {
char buf[1024];
double v;
/* Valid hexadecimal value. */
redis_strlcpy(buf,"0x0p+0",sizeof(buf));
assert(string2d(buf,strlen(buf),&v) == 1);
assert(v == 0.0);
redis_strlcpy(buf,"0x1p+0",sizeof(buf));
assert(string2d(buf,strlen(buf),&v) == 1);
assert(v == 1.0);
/* Valid floating-point numbers */
redis_strlcpy(buf, "1.5", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 1);
assert(v == 1.5);
redis_strlcpy(buf, "-3.14", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 1);
assert(v == -3.14);
redis_strlcpy(buf, "2.0e10", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 1);
assert(v == 2.0e10);
redis_strlcpy(buf, "1e-3", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 1);
assert(v == 0.001);
/* Valid integer */
redis_strlcpy(buf, "42", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 1);
assert(v == 42.0);
/* Invalid cases */
/* Empty. */
redis_strlcpy(buf, "", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 0);
/* Starting by space. */
redis_strlcpy(buf, " 1.23", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 0);
/* Invalid hexadecimal format. */
redis_strlcpy(buf, "0x1.2g", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 0);
/* Hexadecimal NaN */
redis_strlcpy(buf, "0xNan", sizeof(buf));
assert(string2d(buf, strlen(buf), &v) == 0);
/* overflow. */
redis_strlcpy(buf,"23456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789",sizeof(buf));
assert(string2d(buf,strlen(buf),&v) == 0);
}
static void test_ll2string(void) {
char buf[32];
long long v;
int sz;
v = 0;
sz = ll2string(buf, sizeof buf, v);
assert(sz == 1);
assert(!strcmp(buf, "0"));
v = -1;
sz = ll2string(buf, sizeof buf, v);
assert(sz == 2);
assert(!strcmp(buf, "-1"));
v = 99;
sz = ll2string(buf, sizeof buf, v);
assert(sz == 2);
assert(!strcmp(buf, "99"));
v = -99;
sz = ll2string(buf, sizeof buf, v);
assert(sz == 3);
assert(!strcmp(buf, "-99"));
v = -2147483648;
sz = ll2string(buf, sizeof buf, v);
assert(sz == 11);
assert(!strcmp(buf, "-2147483648"));
v = LLONG_MIN;
sz = ll2string(buf, sizeof buf, v);
assert(sz == 20);
assert(!strcmp(buf, "-9223372036854775808"));
v = LLONG_MAX;
sz = ll2string(buf, sizeof buf, v);
assert(sz == 19);
assert(!strcmp(buf, "9223372036854775807"));
}
static void test_ld2string(void) {
char buf[32];
long double v;
int sz;
v = 0.0 / 0.0;
sz = ld2string(buf, sizeof(buf), v, LD_STR_AUTO);
assert(sz == 3);
assert(!strcmp(buf, "nan"));
}
static void test_fixedpoint_d2string(void) {
char buf[32];
double v;
int sz;
v = 0.0;
sz = fixedpoint_d2string(buf, sizeof buf, v, 4);
assert(sz == 6);
assert(!strcmp(buf, "0.0000"));
sz = fixedpoint_d2string(buf, sizeof buf, v, 1);
assert(sz == 3);
assert(!strcmp(buf, "0.0"));
/* set junk in buffer */
memset(buf,'A',32);
v = 0.0001;
sz = fixedpoint_d2string(buf, sizeof buf, v, 4);
assert(sz == 6);
assert(buf[sz] == '\0');
assert(!strcmp(buf, "0.0001"));
/* set junk in buffer */
memset(buf,'A',32);
v = 6.0642951598391699e-05;
sz = fixedpoint_d2string(buf, sizeof buf, v, 4);
assert(sz == 6);
assert(buf[sz] == '\0');
assert(!strcmp(buf, "0.0001"));
v = 0.01;
sz = fixedpoint_d2string(buf, sizeof buf, v, 4);
assert(sz == 6);
assert(!strcmp(buf, "0.0100"));
sz = fixedpoint_d2string(buf, sizeof buf, v, 1);
assert(sz == 3);
assert(!strcmp(buf, "0.0"));
v = -0.01;
sz = fixedpoint_d2string(buf, sizeof buf, v, 4);
assert(sz == 7);
assert(!strcmp(buf, "-0.0100"));
v = -0.1;
sz = fixedpoint_d2string(buf, sizeof buf, v, 1);
assert(sz == 4);
assert(!strcmp(buf, "-0.1"));
v = 0.1;
sz = fixedpoint_d2string(buf, sizeof buf, v, 1);
assert(sz == 3);
assert(!strcmp(buf, "0.1"));
v = 0.01;
sz = fixedpoint_d2string(buf, sizeof buf, v, 17);
assert(sz == 19);
assert(!strcmp(buf, "0.01000000000000000"));
v = 10.01;
sz = fixedpoint_d2string(buf, sizeof buf, v, 4);
assert(sz == 7);
assert(!strcmp(buf, "10.0100"));
/* negative tests */
sz = fixedpoint_d2string(buf, sizeof buf, v, 18);
assert(sz == 0);
sz = fixedpoint_d2string(buf, sizeof buf, v, 0);
assert(sz == 0);
sz = fixedpoint_d2string(buf, 1, v, 1);
assert(sz == 0);
}
#if defined(__linux__)
/* Since fadvise and mincore is only supported in specific platforms like
* Linux, we only verify the fadvise mechanism works in Linux */
static int cache_exist(int fd) {
unsigned char flag;
void *m = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
assert(m);
assert(mincore(m, 4096, &flag) == 0);
munmap(m, 4096);
/* the least significant bit of the byte will be set if the corresponding
* page is currently resident in memory */
return flag&1;
}
static void test_reclaimFilePageCache(void) {
char *tmpfile = "/tmp/redis-reclaim-cache-test";
int fd = open(tmpfile, O_RDWR|O_CREAT, 0644);
assert(fd >= 0);
/* test write file */
char buf[4] = "foo";
assert(write(fd, buf, sizeof(buf)) > 0);
assert(cache_exist(fd));
assert(redis_fsync(fd) == 0);
assert(reclaimFilePageCache(fd, 0, 0) == 0);
assert(!cache_exist(fd));
/* test read file */
assert(pread(fd, buf, sizeof(buf), 0) > 0);
assert(cache_exist(fd));
assert(reclaimFilePageCache(fd, 0, 0) == 0);
assert(!cache_exist(fd));
unlink(tmpfile);
printf("reclaimFilePageCach test is ok\n");
}
#endif
int utilTest(int argc, char **argv, int flags) {
UNUSED(argc);
UNUSED(argv);
UNUSED(flags);
test_string2ll();
test_string2l();
test_string2d();
test_ll2string();
test_ld2string();
test_fixedpoint_d2string();
#if defined(__linux__)
if (!(flags & REDIS_TEST_VALGRIND)) {
test_reclaimFilePageCache();
}
#endif
printf("Done testing util\n");
return 0;
}
#endif | c | github | https://github.com/redis/redis | src/util.c |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
- debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
# set to generate locales:
- debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
# Accept oracle license
- debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: true
vtype: select
# Specifying package you can register/return the list of questions and current values
- debconf:
name: tzdata
'''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['pkg'], type='str'),
question=dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype=dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title',
'text', 'seen']),
value=dict(required=False, type='str', aliases=['answer']),
unseen=dict(required=False, type='bool'),
),
required_together=(['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
null_map = dict(cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [line[:6] + (null_map[line[0]]=='YES',)
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
# row[0] and row[1] are single-item lists, so grab the single item.
relations[row[0][0] - 1] = (row[1][0] - 1, row[2])
return relations
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
return indexes | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of gRPC Python's application-layer API."""
import itertools
import threading
import unittest
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = b'/test/UnaryUnary'
_UNARY_STREAM = b'/test/UnaryStream'
_STREAM_UNARY = b'/test/StreamUnary'
_STREAM_STREAM = b'/test/StreamStream'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control):
self._control = control
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(
self, request_streaming, response_streaming, request_deserializer,
response_serializer, unary_unary, unary_stream, stream_unary,
stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(
False, False, None, None, self._handler.handle_unary_unary, None,
None, None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(
False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(
True, False, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(
True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
else:
return None
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(
_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(
_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class RPCTest(unittest.TestCase):
def setUp(self):
self._control = test_control.PauseFailControl()
self._handler = _Handler(self._control)
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server((), self._server_pool)
port = self._server.add_insecure_port(b'[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def testUnrecognizedMethod(self):
request = b'abc'
with self.assertRaises(grpc.RpcError) as exception_context:
self._channel.unary_unary(b'NoSuchMethod')(request)
self.assertEqual(
grpc.StatusCode.UNIMPLEMENTED, exception_context.exception.code())
def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response = multi_callable(
request, metadata=(
(b'test', b'SuccessfulUnaryRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response, call = multi_callable(
request, metadata=(
(b'test', b'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),),
with_call=True)
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request, metadata=(
(b'test', b'SuccessfulUnaryRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertEqual(expected_response, response)
def testSuccessfulUnaryRequestStreamResponse(self):
request = b'\x37\x58'
expected_responses = tuple(self._handler.handle_unary_stream(request, None))
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=((b'test', b'SuccessfulUnaryRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSuccessfulStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response = multi_callable(
request_iterator,
metadata=((b'test', b'SuccessfulStreamRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response, call = multi_callable(
request_iterator,
metadata=(
(b'test', b'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),
), with_call=True)
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request_iterator,
metadata=(
(b'test', b'SuccessfulStreamRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertEqual(expected_response, response)
def testSuccessfulStreamRequestStreamResponse(self):
requests = tuple(b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
expected_responses = tuple(
self._handler.handle_stream_stream(iter(requests), None))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=((b'test', b'SuccessfulStreamRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSequentialInvocations(self):
first_request = b'\x07\x08'
second_request = b'\x0809'
expected_first_response = self._handler.handle_unary_unary(
first_request, None)
expected_second_response = self._handler.handle_unary_unary(
second_request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
first_response = multi_callable(
first_request, metadata=((b'test', b'SequentialInvocations'),))
second_response = multi_callable(
second_request, metadata=((b'test', b'SequentialInvocations'),))
self.assertEqual(expected_first_response, first_response)
self.assertEqual(expected_second_response, second_response)
def testConcurrentBlockingInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(iter(requests), None)
expected_responses = [expected_response] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = pool.submit(
multi_callable, request_iterator,
metadata=((b'test', b'ConcurrentBlockingInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
pool.shutdown(wait=True)
self.assertSequenceEqual(expected_responses, responses)
def testConcurrentFutureInvocations(self):
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(iter(requests), None)
expected_responses = [expected_response] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = multi_callable.future(
request_iterator,
metadata=((b'test', b'ConcurrentFutureInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
self.assertSequenceEqual(expected_responses, responses)
def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
request = b'\x67\x68'
expected_response = self._handler.handle_unary_unary(request, None)
response_futures = [None] * test_constants.THREAD_CONCURRENCY
lock = threading.Lock()
test_is_running_cell = [True]
def wrap_future(future):
def wrap():
try:
return future.result()
except grpc.RpcError:
with lock:
if test_is_running_cell[0]:
raise
return None
return wrap
multi_callable = _unary_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
inner_response_future = multi_callable.future(
request,
metadata=(
(b'test',
b'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
outer_response_future = pool.submit(wrap_future(inner_response_future))
response_futures[index] = outer_response_future
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
self.assertEqual(expected_response, response_future.result())
with lock:
test_is_running_cell[0] = False
def testConsumingOneStreamResponseUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(
(b'test', b'ConsumingOneStreamResponseUnaryRequest'),))
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(
(b'test', b'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(
(b'test', b'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingTooManyStreamResponsesStreamRequest(self):
requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(
(b'test', b'ConsumingTooManyStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH):
next(response_iterator)
for _ in range(test_constants.STREAM_LENGTH):
with self.assertRaises(StopIteration):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.OK, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledUnaryRequestUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
metadata=((b'test', b'CancelledUnaryRequestUnaryResponse'),))
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
def testCancelledUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request,
metadata=((b'test', b'CancelledUnaryRequestStreamResponse'),))
self._control.block_until_paused()
response_iterator.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator)
self.assertIs(grpc.StatusCode.CANCELLED, exception_context.exception.code())
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledStreamRequestUnaryResponse(self):
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
metadata=((b'test', b'CancelledStreamRequestUnaryResponse'),))
self._control.block_until_paused()
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testCancelledStreamRequestStreamResponse(self):
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request_iterator,
metadata=((b'test', b'CancelledStreamRequestStreamResponse'),))
response_iterator.cancel()
with self.assertRaises(grpc.RpcError):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testExpiredUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request, timeout=test_constants.SHORT_TIMEOUT,
metadata=((b'test', b'ExpiredUnaryRequestBlockingUnaryResponse'),),
with_call=True)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request, timeout=test_constants.SHORT_TIMEOUT,
metadata=((b'test', b'ExpiredUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, response_future.exception().code())
def testExpiredUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request, timeout=test_constants.SHORT_TIMEOUT,
metadata=((b'test', b'ExpiredUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code())
def testExpiredStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator, timeout=test_constants.SHORT_TIMEOUT,
metadata=((b'test', b'ExpiredStreamRequestBlockingUnaryResponse'),))
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator, timeout=test_constants.SHORT_TIMEOUT,
metadata=((b'test', b'ExpiredStreamRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testExpiredStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator, timeout=test_constants.SHORT_TIMEOUT,
metadata=((b'test', b'ExpiredStreamRequestStreamResponse'),))
next(response_iterator)
self.assertIs(
grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code())
def testFailedUnaryRequestBlockingUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request,
metadata=((b'test', b'FailedUnaryRequestBlockingUnaryResponse'),),
with_call=True)
self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
def testFailedUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request,
metadata=((b'test', b'FailedUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(
grpc.StatusCode.UNKNOWN, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.exception().code())
self.assertIs(response_future, value_passed_to_callback)
def testFailedUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError) as exception_context:
with self._control.fail():
response_iterator = multi_callable(
request,
metadata=((b'test', b'FailedUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
def testFailedStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
metadata=((b'test', b'FailedStreamRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
def testFailedStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request_iterator,
metadata=((b'test', b'FailedStreamRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
self.assertIs(
grpc.StatusCode.UNKNOWN, exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future, value_passed_to_callback)
def testFailedStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
metadata=((b'test', b'FailedStreamRequestStreamResponse'),))
tuple(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
def testIgnoredUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
multi_callable.future(
request,
metadata=((b'test', b'IgnoredUnaryRequestFutureUnaryResponse'),))
def testIgnoredUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
multi_callable(
request,
metadata=((b'test', b'IgnoredUnaryRequestStreamResponse'),))
def testIgnoredStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
multi_callable.future(
request_iterator,
metadata=((b'test', b'IgnoredStreamRequestFutureUnaryResponse'),))
def testIgnoredStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
multi_callable(
request_iterator,
metadata=((b'test', b'IgnoredStreamRequestStreamResponse'),))
if __name__ == '__main__':
unittest.main(verbosity=2) | unknown | codeparrot/codeparrot-clean | ||
import sys
from django.core.management.base import LabelCommand
from django.template import TemplateDoesNotExist, loader
from django_extensions.management.utils import signalcommand
def get_template_path(path):
try:
template = loader.find_template(path)
if template[1]:
return template[1].name
# work arround https://code.djangoproject.com/ticket/17199 issue
for template_loader in loader.template_source_loaders:
try:
source, origin = template_loader.load_template_source(path)
return origin
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(path)
except TemplateDoesNotExist:
return None
class Command(LabelCommand):
help = "Finds the location of the given template by resolving its path"
args = "[template_path]"
label = 'template path'
@signalcommand
def handle_label(self, template_path, **options):
path = get_template_path(template_path)
if path is None:
sys.stderr.write("No template found\n")
sys.exit(1)
else:
print(path) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# NimCfitsio documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 15 15:56:38 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('./exts'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['nim-domain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NimCfitsio'
copyright = u'2014, Maurizio Tomasi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NimCfitsiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NimCfitsio.tex', u'NimCfitsio Documentation',
u'Maurizio Tomasi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nimcfitsio', u'NimCfitsio Documentation',
[u'Maurizio Tomasi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NimCfitsio', u'NimCfitsio Documentation',
u'Maurizio Tomasi', 'NimCfitsio', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | unknown | codeparrot/codeparrot-clean | ||
# Webhooks for external integrations.
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.lib.validator import check_dict, check_string
from zerver.models import UserProfile, MAX_SUBJECT_LENGTH
from django.http import HttpRequest, HttpResponse
from typing import Dict, Any, Iterable, Optional, Text
@api_key_only_webhook_view('Splunk')
@has_request_variables
def api_splunk_webhook(request, user_profile,
payload=REQ(argument_type='body'), stream=REQ(default='splunk'),
topic=REQ(default=None)):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text, Optional[Text]) -> HttpResponse
# use default values if expected data is not provided
search_name = payload.get('search_name', 'Missing search_name')
results_link = payload.get('results_link', 'Missing results_link')
host = payload.get('result', {}).get('host', 'Missing host')
source = payload.get('result', {}).get('source', 'Missing source')
raw = payload.get('result', {}).get('_raw', 'Missing _raw')
# if no topic provided, use search name but truncate if too long
if topic is None:
if len(search_name) >= MAX_SUBJECT_LENGTH:
topic = "{}...".format(search_name[:(MAX_SUBJECT_LENGTH - 3)])
else:
topic = search_name
# construct the message body
body = "Splunk alert from saved search"
body_template = ('\n[{search}]({link})\nhost: {host}'
'\nsource: {source}\n\nraw: {raw}')
body += body_template.format(search = search_name, link = results_link,
host = host, source = source, raw = raw)
# send the message
check_send_stream_message(user_profile, request.client, stream, topic, body)
return json_success() | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package phases
import (
"fmt"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
"k8s.io/kubernetes/cmd/kubeadm/app/util/errors"
)
var (
controlPlaneExample = cmdutil.Examples(`
# Generates all static Pod manifest files for control plane components,
# functionally equivalent to what is generated by kubeadm init.
kubeadm init phase control-plane all
# Generates all static Pod manifest files using options read from a configuration file.
kubeadm init phase control-plane all --config config.yaml
`)
controlPlanePhaseProperties = map[string]struct {
name string
short string
}{
kubeadmconstants.KubeAPIServer: {
name: "apiserver",
short: getPhaseDescription(kubeadmconstants.KubeAPIServer),
},
kubeadmconstants.KubeControllerManager: {
name: "controller-manager",
short: getPhaseDescription(kubeadmconstants.KubeControllerManager),
},
kubeadmconstants.KubeScheduler: {
name: "scheduler",
short: getPhaseDescription(kubeadmconstants.KubeScheduler),
},
}
)
func getPhaseDescription(component string) string {
return fmt.Sprintf("Generates the %s static Pod manifest", component)
}
// NewControlPlanePhase creates a kubeadm workflow phase that implements bootstrapping the control plane.
func NewControlPlanePhase() workflow.Phase {
phase := workflow.Phase{
Name: "control-plane",
Short: "Generate all static Pod manifest files necessary to establish the control plane",
Phases: []workflow.Phase{
{
Name: "all",
Short: "Generate all static Pod manifest files",
InheritFlags: getControlPlanePhaseFlags("all"),
Example: controlPlaneExample,
RunAllSiblings: true,
},
newControlPlaneSubphase(kubeadmconstants.KubeAPIServer),
newControlPlaneSubphase(kubeadmconstants.KubeControllerManager),
newControlPlaneSubphase(kubeadmconstants.KubeScheduler),
},
Run: runControlPlanePhase,
}
return phase
}
func newControlPlaneSubphase(component string) workflow.Phase {
phase := workflow.Phase{
Name: controlPlanePhaseProperties[component].name,
Short: controlPlanePhaseProperties[component].short,
Run: runControlPlaneSubphase(component),
InheritFlags: getControlPlanePhaseFlags(component),
}
return phase
}
func getControlPlanePhaseFlags(name string) []string {
flags := []string{
options.CfgPath,
options.CertificatesDir,
options.KubernetesVersion,
options.ImageRepository,
options.Patches,
options.DryRun,
}
if name == "all" || name == kubeadmconstants.KubeAPIServer {
flags = append(flags,
options.APIServerAdvertiseAddress,
options.ControlPlaneEndpoint,
options.APIServerBindPort,
options.APIServerExtraArgs,
options.FeatureGatesString,
options.NetworkingServiceSubnet,
)
}
if name == "all" || name == kubeadmconstants.KubeControllerManager {
flags = append(flags,
options.ControllerManagerExtraArgs,
options.NetworkingPodSubnet,
)
}
if name == "all" || name == kubeadmconstants.KubeScheduler {
flags = append(flags,
options.SchedulerExtraArgs,
)
}
return flags
}
func runControlPlanePhase(c workflow.RunData) error {
data, ok := c.(InitData)
if !ok {
return errors.New("control-plane phase invoked with an invalid data struct")
}
fmt.Printf("[control-plane] Using manifest folder %q\n", data.ManifestDir())
return nil
}
func runControlPlaneSubphase(component string) func(c workflow.RunData) error {
return func(c workflow.RunData) error {
data, ok := c.(InitData)
if !ok {
return errors.New("control-plane phase invoked with an invalid data struct")
}
cfg := data.Cfg()
fmt.Printf("[control-plane] Creating static Pod manifest for %q\n", component)
return controlplane.CreateStaticPodFiles(data.ManifestDir(), data.PatchesDir(), &cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, data.DryRun(), component)
}
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kubeadm/app/cmd/phases/init/controlplane.go |
from django.db import models
class NiceModel(models.Model):
pass | python | github | https://github.com/django/django | tests/proxy_model_inheritance/app2/models.py |
#!/usr/bin/env python
#
# Copyright 2006,2007,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import digital_swig
class test_lms_dd_equalizer(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def transform(self, src_data, gain, const):
SRC = gr.vector_source_c(src_data, False)
EQU = digital_swig.lms_dd_equalizer_cc(4, gain, 1, const.base())
DST = gr.vector_sink_c()
self.tb.connect(SRC, EQU, DST)
self.tb.run()
return DST.data()
def test_001_identity(self):
# Constant modulus signal so no adjustments
const = digital_swig.constellation_qpsk()
src_data = const.points()*1000
N = 100 # settling time
expected_data = src_data[N:]
result = self.transform(src_data, 0.1, const)[N:]
self.assertComplexTuplesAlmostEqual(expected_data, result, 5)
if __name__ == "__main__":
gr_unittest.run(test_lms_dd_equalizer, "test_lms_dd_equalizer.xml") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# Guillaume Bour, guillaume@bour.cc
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Gerhard Lausser, gerhard.lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
# Thibault Cohen, titilambert@gmail.com
# Jean Gabes, naparuba@gmail.com
# Christophe Simon, geektophe@gmail.com
# Romain Forlot, rforlot@yahoo.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from itemgroup import Itemgroup, Itemgroups
from alignak.util import get_obj_name
from alignak.property import StringProp, IntegerProp
from alignak.log import logger
class Hostgroup(Itemgroup):
id = 1 # zero is always a little bit special... like in database
my_type = 'hostgroup'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'hostgroup_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(fill_brok=['full_status']),
'notes': StringProp(default='', fill_brok=['full_status']),
'notes_url': StringProp(default='', fill_brok=['full_status']),
'action_url': StringProp(default='', fill_brok=['full_status']),
'realm': StringProp(default='', fill_brok=['full_status'],
conf_send_preparation=get_obj_name),
})
macros = {
'HOSTGROUPALIAS': 'alias',
'HOSTGROUPMEMBERS': 'members',
'HOSTGROUPNOTES': 'notes',
'HOSTGROUPNOTESURL': 'notes_url',
'HOSTGROUPACTIONURL': 'action_url'
}
def get_name(self):
return self.hostgroup_name
def get_hosts(self):
if getattr(self, 'members', None) is not None:
return self.members
else:
return []
def get_hostgroup_members(self):
if self.has('hostgroup_members'):
return [m.strip() for m in self.hostgroup_members.split(',')]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_hosts_by_explosion(self, hostgroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hg = hostgroups.find_by_name(hg_mbr.strip())
if hg is not None:
value = hg.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_string_member(value)
return self.get_hosts()
class Hostgroups(Itemgroups):
name_property = "hostgroup_name" # is used for finding hostgroups
inner_class = Hostgroup
def get_members_by_name(self, hgname):
hg = self.find_by_name(hgname)
if hg is None:
return []
return hg.get_hosts()
def linkify(self, hosts=None, realms=None):
self.linkify_hg_by_hst(hosts)
self.linkify_hg_by_realms(realms)
# We just search for each hostgroup the id of the hosts
# and replace the name by the id
def linkify_hg_by_hst(self, hosts):
for hg in self:
mbrs = hg.get_hosts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the begining so don't care about spaces
if mbr == '': # void entry, skip this
continue
elif mbr == '*':
new_mbrs.extend(hosts)
else:
h = hosts.find_by_name(mbr)
if h is not None:
new_mbrs.append(h)
else:
hg.add_string_unknown_member(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
hg.replace_members(new_mbrs)
# Now register us in our members
for h in hg.members:
h.hostgroups.append(hg)
# and be sure we are uniq in it
h.hostgroups = list(set(h.hostgroups))
# More than an explode function, but we need to already
# have members so... Will be really linkify just after
# And we explode realm in ours members, but do not override
# a host realm value if it's already set
def linkify_hg_by_realms(self, realms):
# Now we explode the realm value if we've got one
# The group realm must not override a host one (warning?)
for hg in self:
if not hasattr(hg, 'realm'):
continue
# Maybe the value is void?
if not hg.realm.strip():
continue
r = realms.find_by_name(hg.realm.strip())
if r is not None:
hg.realm = r
logger.debug("[hostgroups] %s is in %s realm", hg.get_name(), r.get_name())
else:
err = "the hostgroup %s got an unknown realm '%s'" % (hg.get_name(), hg.realm)
hg.configuration_errors.append(err)
hg.realm = None
continue
for h in hg:
if h is None:
continue
if h.realm is None or h.got_default_realm: # default value not hasattr(h, 'realm'):
logger.debug("[hostgroups] apply a realm %s to host %s from a hostgroup "
"rule (%s)", hg.realm.get_name(), h.get_name(), hg.get_name())
h.realm = hg.realm
else:
if h.realm != hg.realm:
logger.warning("[hostgroups] host %s it not in the same realm than it's "
"hostgroup %s", h.get_name(), hg.get_name())
# Add a host string to a hostgroup member
# if the host group do not exist, create it
def add_member(self, hname, hgname):
hg = self.find_by_name(hgname)
# if the id do not exist, create the hg
if hg is None:
hg = Hostgroup({'hostgroup_name': hgname, 'alias': hgname, 'members': hname})
self.add(hg)
else:
hg.add_string_member(hname)
# Use to fill members with hostgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_hg in self.items.values():
tmp_hg.already_explode = False
for hg in self.items.values():
if hg.has('hostgroup_members') and not hg.already_explode:
# get_hosts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_hg in self.items.values():
tmp_hg.rec_tag = False
hg.get_hosts_by_explosion(self)
# We clean the tags
for tmp_hg in self.items.values():
if hasattr(tmp_hg, 'rec_tag'):
del tmp_hg.rec_tag
del tmp_hg.already_explode | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
)
// DeprecatedControllerOptions holds the DeprecatedController options, those option are deprecated.
// TODO remove these fields once the deprecated flags are removed.
type DeprecatedControllerOptions struct {
*kubectrlmgrconfig.DeprecatedControllerConfiguration
}
// AddFlags adds flags related to DeprecatedController for controller manager to the specified FlagSet.
func (o *DeprecatedControllerOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
}
// ApplyTo fills up DeprecatedController config with options.
func (o *DeprecatedControllerOptions) ApplyTo(cfg *kubectrlmgrconfig.DeprecatedControllerConfiguration) error {
if o == nil {
return nil
}
return nil
}
// Validate checks validation of DeprecatedControllerOptions.
func (o *DeprecatedControllerOptions) Validate() []error {
if o == nil {
return nil
}
errs := []error{}
return errs
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kube-controller-manager/app/options/deprecatedcontroller.go |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_vxlan_arp
version_added: "2.4"
short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
description:
- Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
evn_bgp:
description:
- Enables EVN BGP.
required: false
choices: ['enable', 'disable']
default: null
evn_source_ip:
description:
- Specifies the source address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_peer_ip:
description:
- Specifies the IP address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_server:
description:
- Configures the local device as the router reflector (RR) on the EVN network.
required: false
choices: ['enable', 'disable']
default: null
evn_reflect_client:
description:
- Configures the local device as the route reflector (RR) and its peer as the client.
required: false
choices: ['enable', 'disable']
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
arp_collect_host:
description:
- Enables EVN BGP or BGP EVPN to collect host information.
required: false
choices: ['enable', 'disable']
default: null
host_collect_protocol:
description:
- Enables EVN BGP or BGP EVPN to advertise host information.
required: false
choices: ['bgp','none']
default: null
bridge_domain_id:
description:
- Specifies a BD(bridge domain) ID.
The value is an integer ranging from 1 to 16777215.
required: false
default: null
arp_suppress:
description:
- Enables ARP broadcast suppression in a BD.
required: false
choices: ['enable', 'disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan arp module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships.
ce_vxlan_arp:
evn_bgp: enable
evn_source_ip: 6.6.6.6
evn_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Configure a Layer 3 VXLAN gateway as a BGP RR.
ce_vxlan_arp:
evn_bgp: enable
evn_server: enable
provider: "{{ cli }}"
- name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information.
ce_vxlan_arp:
vbdif_name: Vbdif100
arp_collect_host: enable
provider: "{{ cli }}"
- name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information.
ce_vxlan_arp:
host_collect_protocol: bgp
provider: "{{ cli }}"
- name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway.
ce_vxlan_arp:
bridge_domain_id: 100
arp_suppress: enable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evn bgp",
"source-address 6.6.6.6",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def get_evn_peers(config):
"""get evn peer ip list"""
get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return list(set(get))
def get_evn_srouce(config):
"""get evn peer ip list"""
get = re.findall(
r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_evn_reflect_client(config):
"""get evn reflect client list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config)
if not get:
return None
else:
return list(get)
class VxlanArp(object):
"""
Manages arp attributes of VXLAN.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.evn_bgp = self.module.params['evn_bgp']
self.evn_source_ip = self.module.params['evn_source_ip']
self.evn_peer_ip = self.module.params['evn_peer_ip']
self.evn_server = self.module.params['evn_server']
self.evn_reflect_client = self.module.params['evn_reflect_client']
self.vbdif_name = self.module.params['vbdif_name']
self.arp_collect_host = self.module.params['arp_collect_host']
self.host_collect_protocol = self.module.params[
'host_collect_protocol']
self.bridge_domain_id = self.module.params['bridge_domain_id']
self.arp_suppress = self.module.params['arp_suppress']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")]
self.module = AnsibleModule(argument_spec=self.spec,
required_together=required_together,
supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = "| ignore-case section include evn bgp|host collect protocol bgp"
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
if self.bridge_domain_id:
exp += "|^bridge-domain %s$" % self.bridge_domain_id
flags.append(exp)
config = get_config(self.module, flags)
return config
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_bridge_domain(self):
"""manage bridge domain configuration"""
if not self.bridge_domain_id:
return
# bridge-domain bd-id
# [undo] arp broadcast-suppress enable
cmd = "bridge-domain %s" % self.bridge_domain_id
if not is_config_exist(self.config, cmd):
self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id)
cmd = "arp broadcast-suppress enable"
exist = is_config_exist(self.config, cmd)
if self.arp_suppress == "enable" and not exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_suppress == "disable" and exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_evn_bgp(self):
"""enables EVN BGP and configure evn bgp command"""
evn_bgp_view = False
evn_bgp_enable = False
cmd = "evn bgp"
exist = is_config_exist(self.config, cmd)
if self.evn_bgp == "enable" or exist:
evn_bgp_enable = True
# [undo] evn bgp
if self.evn_bgp:
if self.evn_bgp == "enable" and not exist:
self.cli_add_command(cmd)
evn_bgp_view = True
elif self.evn_bgp == "disable" and exist:
self.cli_add_command(cmd, undo=True)
return
# [undo] source-address ip-address
if evn_bgp_enable and self.evn_source_ip:
cmd = "source-address %s" % self.evn_source_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] peer ip-address
# [undo] peer ipv4-address reflect-client
if evn_bgp_enable and self.evn_peer_ip:
cmd = "peer %s" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
if self.evn_reflect_client == "enable":
self.cli_add_command(
"peer %s reflect-client" % self.evn_peer_ip)
else:
if self.evn_reflect_client:
cmd = "peer %s reflect-client" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.evn_reflect_client == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_reflect_client == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
else:
if exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] server enable
if evn_bgp_enable and self.evn_server:
cmd = "server enable"
exist = is_config_exist(self.config, cmd)
if self.evn_server == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_server == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
if evn_bgp_view:
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
# interface vbdif bd-id
# [undo] arp collect host enable
cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s does not exist." % self.vbdif_name)
cmd = "arp collect host enable"
exist = is_config_exist(self.config, cmd)
if self.arp_collect_host == "enable" and not exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_collect_host == "disable" and exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_host_collect_protocal(self):
"""Enable EVN BGP or BGP EVPN to advertise host information"""
# [undo] host collect protocol bgp
cmd = "host collect protocol bgp"
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if self.host_collect_protocol == "bgp" and not exist:
self.cli_add_command(cmd)
elif self.host_collect_protocol == "none" and exist:
self.cli_add_command(cmd, undo=True)
else:
if self.host_collect_protocol == "bgp" and exist:
self.cli_add_command(cmd, undo=True)
def is_valid_vbdif(self, ifname):
"""check is interface vbdif is valid"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def check_params(self):
"""Check all input params"""
# bridge domain id check
if self.bridge_domain_id:
if not self.bridge_domain_id.isdigit():
self.module.fail_json(
msg="Error: Bridge domain id is not digit.")
if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215:
self.module.fail_json(
msg="Error: Bridge domain id is not in the range from 1 to 16777215.")
# evn_source_ip check
if self.evn_source_ip:
if not is_valid_v4addr(self.evn_source_ip):
self.module.fail_json(msg="Error: evn_source_ip is invalid.")
# evn_peer_ip check
if self.evn_peer_ip:
if not is_valid_v4addr(self.evn_peer_ip):
self.module.fail_json(msg="Error: evn_peer_ip is invalid.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(
" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# evn_reflect_client and evn_peer_ip must set at the same time
if self.evn_reflect_client and not self.evn_peer_ip:
self.module.fail_json(
msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.")
# evn_server and evn_reflect_client can not set at the same time
if self.evn_server == "enable" and self.evn_reflect_client == "enable":
self.module.fail_json(
msg="Error: evn_server and evn_reflect_client can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.evn_bgp:
self.proposed["evn_bgp"] = self.evn_bgp
if self.evn_source_ip:
self.proposed["evn_source_ip"] = self.evn_source_ip
if self.evn_peer_ip:
self.proposed["evn_peer_ip"] = self.evn_peer_ip
if self.evn_server:
self.proposed["evn_server"] = self.evn_server
if self.evn_reflect_client:
self.proposed["evn_reflect_client"] = self.evn_reflect_client
if self.arp_collect_host:
self.proposed["arp_collect_host"] = self.arp_collect_host
if self.host_collect_protocol:
self.proposed["host_collect_protocol"] = self.host_collect_protocol
if self.arp_suppress:
self.proposed["arp_suppress"] = self.arp_suppress
if self.vbdif_name:
self.proposed["vbdif_name"] = self.evn_peer_ip
if self.bridge_domain_id:
self.proposed["bridge_domain_id"] = self.bridge_domain_id
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
evn_bgp_exist = is_config_exist(self.config, "evn bgp")
if evn_bgp_exist:
self.existing["evn_bgp"] = "enable"
else:
self.existing["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(self.config, "server enable"):
self.existing["evn_server"] = "enable"
else:
self.existing["evn_server"] = "disable"
self.existing["evn_source_ip"] = get_evn_srouce(self.config)
self.existing["evn_peer_ip"] = get_evn_peers(self.config)
self.existing["evn_reflect_client"] = get_evn_reflect_client(
self.config)
if is_config_exist(self.config, "arp collect host enable"):
self.existing["host_collect_protocol"] = "enable"
else:
self.existing["host_collect_protocol"] = "disable"
if is_config_exist(self.config, "host collect protocol bgp"):
self.existing["host_collect_protocol"] = "bgp"
else:
self.existing["host_collect_protocol"] = None
if is_config_exist(self.config, "arp broadcast-suppress enable"):
self.existing["arp_suppress"] = "enable"
else:
self.existing["arp_suppress"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
evn_bgp_exist = is_config_exist(config, "evn bgp")
if evn_bgp_exist:
self.end_state["evn_bgp"] = "enable"
else:
self.end_state["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(config, "server enable"):
self.end_state["evn_server"] = "enable"
else:
self.end_state["evn_server"] = "disable"
self.end_state["evn_source_ip"] = get_evn_srouce(config)
self.end_state["evn_peer_ip"] = get_evn_peers(config)
self.end_state[
"evn_reflect_client"] = get_evn_reflect_client(config)
if is_config_exist(config, "arp collect host enable"):
self.end_state["host_collect_protocol"] = "enable"
else:
self.end_state["host_collect_protocol"] = "disable"
if is_config_exist(config, "host collect protocol bgp"):
self.end_state["host_collect_protocol"] = "bgp"
else:
self.end_state["host_collect_protocol"] = None
if is_config_exist(config, "arp broadcast-suppress enable"):
self.end_state["arp_suppress"] = "enable"
else:
self.end_state["arp_suppress"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip:
self.config_evn_bgp()
if self.vbdif_name and self.arp_collect_host:
self.config_vbdif()
if self.host_collect_protocol:
self.config_host_collect_protocal()
if self.bridge_domain_id and self.arp_suppress:
self.config_bridge_domain()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
evn_bgp=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_source_ip=dict(required=False, type='str'),
evn_peer_ip=dict(required=False, type='str'),
evn_server=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_reflect_client=dict(
required=False, type='str', choices=['enable', 'disable']),
vbdif_name=dict(required=False, type='str'),
arp_collect_host=dict(required=False, type='str',
choices=['enable', 'disable']),
host_collect_protocol=dict(
required=False, type='str', choices=['bgp', 'none']),
bridge_domain_id=dict(required=False, type='str'),
arp_suppress=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanArp(argument_spec)
module.work()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
---
title: State Management
---
# State Management
[MODES: framework, data]
<br/>
<br/>
State management in React typically involves maintaining a synchronized cache of server data on the client side. However, when using React Router as your framework, most of the traditional caching solutions become redundant because of how it inherently handles data synchronization.
## Understanding State Management in React
In a typical React context, when we refer to "state management", we're primarily discussing how we synchronize server state with the client. A more apt term could be "cache management" because the server is the source of truth and the client state is mostly functioning as a cache.
Popular caching solutions in React include:
- **Redux:** A predictable state container for JavaScript apps.
- **React Query:** Hooks for fetching, caching, and updating asynchronous data in React.
- **Apollo:** A comprehensive state management library for JavaScript that integrates with GraphQL.
In certain scenarios, using these libraries may be warranted. However, with React Router's unique server-focused approach, their utility becomes less prevalent. In fact, most React Router applications forgo them entirely.
## How React Router Simplifies State
React Router seamlessly bridges the gap between the backend and frontend via mechanisms like loaders, actions, and forms with automatic synchronization through revalidation. This offers developers the ability to directly use server state within components without managing a cache, the network communication, or data revalidation, making most client-side caching redundant.
Here's why using typical React state patterns might be an anti-pattern in React Router:
1. **Network-related State:** If your React state is managing anything related to the network—such as data from loaders, pending form submissions, or navigational states—it's likely that you're managing state that React Router already manages:
- **[`useNavigation`][use_navigation]**: This hook gives you access to `navigation.state`, `navigation.formData`, `navigation.location`, etc.
- **[`useFetcher`][use_fetcher]**: This facilitates interaction with `fetcher.state`, `fetcher.formData`, `fetcher.data` etc.
- **[`loaderData`][loader_data]**: Access the data for a route.
- **[`actionData`][action_data]**: Access the data from the latest action.
2. **Storing Data in React Router:** A lot of data that developers might be tempted to store in React state has a more natural home in React Router, such as:
- **URL Search Params:** Parameters within the URL that hold state.
- **[Cookies][cookies]:** Small pieces of data stored on the user's device.
- **[Server Sessions][sessions]:** Server-managed user sessions.
- **Server Caches:** Cached data on the server side for quicker retrieval.
3. **Performance Considerations:** At times, client state is leveraged to avoid redundant data fetching. With React Router, you can use the [`Cache-Control`][cache_control_header] headers within `loader`s, allowing you to tap into the browser's native cache. However, this approach has its limitations and should be used judiciously. It's usually more beneficial to optimize backend queries or implement a server cache. This is because such changes benefit all users and do away with the need for individual browser caches.
As a developer transitioning to React Router, it's essential to recognize and embrace its inherent efficiencies rather than applying traditional React patterns. React Router offers a streamlined solution to state management leading to less code, fresh data, and no state synchronization bugs.
## Examples
### Network Related State
For examples on using React Router's internal state to manage network related state, refer to [Pending UI][pending_ui].
### URL Search Params
Consider a UI that lets the user customize between list view or detail view. Your instinct might be to reach for React state:
```tsx bad lines=[2,6,9]
export function List() {
const [view, setView] = useState("list");
return (
<div>
<div>
<button onClick={() => setView("list")}>
View as List
</button>
<button onClick={() => setView("details")}>
View with Details
</button>
</div>
{view === "list" ? <ListView /> : <DetailView />}
</div>
);
}
```
Now consider you want the URL to update when the user changes the view. Note the state synchronization:
```tsx bad lines=[7,16,24]
import { useNavigate, useSearchParams } from "react-router";
export function List() {
const navigate = useNavigate();
const [searchParams] = useSearchParams();
const [view, setView] = useState(
searchParams.get("view") || "list",
);
return (
<div>
<div>
<button
onClick={() => {
setView("list");
navigate(`?view=list`);
}}
>
View as List
</button>
<button
onClick={() => {
setView("details");
navigate(`?view=details`);
}}
>
View with Details
</button>
</div>
{view === "list" ? <ListView /> : <DetailView />}
</div>
);
}
```
Instead of synchronizing state, you can simply read and set the state in the URL directly with boring old HTML forms:
```tsx good lines=[5,9-16]
import { Form, useSearchParams } from "react-router";
export function List() {
const [searchParams] = useSearchParams();
const view = searchParams.get("view") || "list";
return (
<div>
<Form>
<button name="view" value="list">
View as List
</button>
<button name="view" value="details">
View with Details
</button>
</Form>
{view === "list" ? <ListView /> : <DetailView />}
</div>
);
}
```
### Persistent UI State
Consider a UI that toggles a sidebar's visibility. We have three ways to handle the state:
1. React state
2. Browser local storage
3. Cookies
In this discussion, we'll break down the trade-offs associated with each method.
#### React State
React state provides a simple solution for temporary state storage.
**Pros**:
- **Simple**: Easy to implement and understand.
- **Encapsulated**: State is scoped to the component.
**Cons**:
- **Transient**: Doesn't survive page refreshes, returning to the page later, or unmounting and remounting the component.
**Implementation**:
```tsx
function Sidebar() {
const [isOpen, setIsOpen] = useState(false);
return (
<div>
<button onClick={() => setIsOpen((open) => !open)}>
{isOpen ? "Close" : "Open"}
</button>
<aside hidden={!isOpen}>
<Outlet />
</aside>
</div>
);
}
```
#### Local Storage
To persist state beyond the component lifecycle, browser local storage is a step-up. See our doc on [Client Data][client_data] for more advanced examples.
**Pros**:
- **Persistent**: Maintains state across page refreshes and component mounts/unmounts.
- **Encapsulated**: State is scoped to the component.
**Cons**:
- **Requires Synchronization**: React components must sync up with local storage to initialize and save the current state.
- **Server Rendering Limitation**: The [`window`][window_global] and [`localStorage`][local_storage_global] objects are not accessible during server-side rendering, so state must be initialized in the browser with an effect.
- **UI Flickering**: On initial page loads, the state in local storage may not match what was rendered by the server and the UI will flicker when JavaScript loads.
**Implementation**:
```tsx
function Sidebar() {
const [isOpen, setIsOpen] = useState(false);
// synchronize initially
useLayoutEffect(() => {
const isOpen = window.localStorage.getItem("sidebar");
setIsOpen(isOpen);
}, []);
// synchronize on change
useEffect(() => {
window.localStorage.setItem("sidebar", isOpen);
}, [isOpen]);
return (
<div>
<button onClick={() => setIsOpen((open) => !open)}>
{isOpen ? "Close" : "Open"}
</button>
<aside hidden={!isOpen}>
<Outlet />
</aside>
</div>
);
}
```
In this approach, state must be initialized within an effect. This is crucial to avoid complications during server-side rendering. Directly initializing the React state from `localStorage` will cause errors since `window.localStorage` is unavailable during server rendering.
```tsx bad lines=[4]
function Sidebar() {
const [isOpen, setIsOpen] = useState(
// error: window is not defined
window.localStorage.getItem("sidebar"),
);
// ...
}
```
By initializing the state within an effect, there's potential for a mismatch between the server-rendered state and the state stored in local storage. This discrepancy will lead to brief UI flickering shortly after the page renders and should be avoided.
#### Cookies
Cookies offer a comprehensive solution for this use case. However, this method introduces added preliminary setup before making the state accessible within the component.
**Pros**:
- **Server Rendering**: State is available on the server for rendering and even for server actions.
- **Single Source of Truth**: Eliminates state synchronization hassles.
- **Persistence**: Maintains state across page loads and component mounts/unmounts. State can even persist across devices if you switch to a database-backed session.
- **Progressive Enhancement**: Functions even before JavaScript loads.
**Cons**:
- **Boilerplate**: Requires more code because of the network.
- **Exposed**: The state is not encapsulated to a single component, other parts of the app must be aware of the cookie.
**Implementation**:
First we'll need to create a cookie object:
```tsx
import { createCookie } from "react-router";
export const prefs = createCookie("prefs");
```
Next we set up the server action and loader to read and write the cookie:
```tsx filename=app/routes/sidebar.tsx
import { data, Outlet } from "react-router";
import type { Route } from "./+types/sidebar";
import { prefs } from "./prefs-cookie";
// read the state from the cookie
export async function loader({
request,
}: Route.LoaderArgs) {
const cookieHeader = request.headers.get("Cookie");
const cookie = (await prefs.parse(cookieHeader)) || {};
return data({ sidebarIsOpen: cookie.sidebarIsOpen });
}
// write the state to the cookie
export async function action({
request,
}: Route.ActionArgs) {
const cookieHeader = request.headers.get("Cookie");
const cookie = (await prefs.parse(cookieHeader)) || {};
const formData = await request.formData();
const isOpen = formData.get("sidebar") === "open";
cookie.sidebarIsOpen = isOpen;
return data(isOpen, {
headers: {
"Set-Cookie": await prefs.serialize(cookie),
},
});
}
```
After the server code is set up, we can use the cookie state in our UI:
```tsx
function Sidebar({ loaderData }: Route.ComponentProps) {
const fetcher = useFetcher();
let { sidebarIsOpen } = loaderData;
// use optimistic UI to immediately change the UI state
if (fetcher.formData?.has("sidebar")) {
sidebarIsOpen =
fetcher.formData.get("sidebar") === "open";
}
return (
<div>
<fetcher.Form method="post">
<button
name="sidebar"
value={sidebarIsOpen ? "closed" : "open"}
>
{sidebarIsOpen ? "Close" : "Open"}
</button>
</fetcher.Form>
<aside hidden={!sidebarIsOpen}>
<Outlet />
</aside>
</div>
);
}
```
While this is certainly more code that touches more of the application to account for the network requests and responses, the UX is greatly improved. Additionally, state comes from a single source of truth without any state synchronization required.
In summary, each of the discussed methods offers a unique set of benefits and challenges:
- **React state**: Offers simple but transient state management.
- **Local Storage**: Provides persistence but with synchronization requirements and UI flickering.
- **Cookies**: Delivers robust, persistent state management at the cost of added boilerplate.
None of these are wrong, but if you want to persist the state across visits, cookies offer the best user experience.
### Form Validation and Action Data
Client-side validation can augment the user experience, but similar enhancements can be achieved by leaning more towards server-side processing and letting it handle the complexities.
The following example illustrates the inherent complexities of managing network state, coordinating state from the server, and implementing validation redundantly on both the client and server sides. It's just for illustration, so forgive any obvious bugs or problems you find.
```tsx bad lines=[2,11,27,38,63]
export function Signup() {
// A multitude of React State declarations
const [isSubmitting, setIsSubmitting] = useState(false);
const [userName, setUserName] = useState("");
const [userNameError, setUserNameError] = useState(null);
const [password, setPassword] = useState(null);
const [passwordError, setPasswordError] = useState("");
// Replicating server-side logic in the client
function validateForm() {
setUserNameError(null);
setPasswordError(null);
const errors = validateSignupForm(userName, password);
if (errors) {
if (errors.userName) {
setUserNameError(errors.userName);
}
if (errors.password) {
setPasswordError(errors.password);
}
}
return Boolean(errors);
}
// Manual network interaction handling
async function handleSubmit() {
if (validateForm()) {
setSubmitting(true);
const res = await postJSON("/api/signup", {
userName,
password,
});
const json = await res.json();
setIsSubmitting(false);
// Server state synchronization to the client
if (json.errors) {
if (json.errors.userName) {
setUserNameError(json.errors.userName);
}
if (json.errors.password) {
setPasswordError(json.errors.password);
}
}
}
}
return (
<form
onSubmit={(event) => {
event.preventDefault();
handleSubmit();
}}
>
<p>
<input
type="text"
name="username"
value={userName}
onChange={() => {
// Synchronizing form state for the fetch
setUserName(event.target.value);
}}
/>
{userNameError ? <i>{userNameError}</i> : null}
</p>
<p>
<input
type="password"
name="password"
onChange={(event) => {
// Synchronizing form state for the fetch
setPassword(event.target.value);
}}
/>
{passwordError ? <i>{passwordError}</i> : null}
</p>
<button disabled={isSubmitting} type="submit">
Sign Up
</button>
{isSubmitting ? <BusyIndicator /> : null}
</form>
);
}
```
The backend endpoint, `/api/signup`, also performs validation and sends error feedback. Note that some essential validation, like detecting duplicate usernames, can only be done server-side using information the client doesn't have access to.
```tsx bad
export async function signupHandler(request: Request) {
const errors = await validateSignupRequest(request);
if (errors) {
return { ok: false, errors: errors };
}
await signupUser(request);
return { ok: true, errors: null };
}
```
Now, let's contrast this with a React Router-based implementation. The action remains consistent, but the component is vastly simplified due to the direct utilization of server state via `actionData`, and leveraging the network state that React Router inherently manages.
```tsx filename=app/routes/signup.tsx good lines=[20-22]
import { useNavigation } from "react-router";
import type { Route } from "./+types/signup";
export async function action({
request,
}: ActionFunctionArgs) {
const errors = await validateSignupRequest(request);
if (errors) {
return { ok: false, errors: errors };
}
await signupUser(request);
return { ok: true, errors: null };
}
export function Signup({
actionData,
}: Route.ComponentProps) {
const navigation = useNavigation();
const userNameError = actionData?.errors?.userName;
const passwordError = actionData?.errors?.password;
const isSubmitting = navigation.formAction === "/signup";
return (
<Form method="post">
<p>
<input type="text" name="username" />
{userNameError ? <i>{userNameError}</i> : null}
</p>
<p>
<input type="password" name="password" />
{passwordError ? <i>{passwordError}</i> : null}
</p>
<button disabled={isSubmitting} type="submit">
Sign Up
</button>
{isSubmitting ? <BusyIndicator /> : null}
</Form>
);
}
```
The extensive state management from our previous example is distilled into just three code lines. We eliminate the necessity for React state, change event listeners, submit handlers, and state management libraries for such network interactions.
Direct access to the server state is made possible through `actionData`, and network state through `useNavigation` (or `useFetcher`).
As bonus party trick, the form is functional even before JavaScript loads (see [Progressive Enhancement][progressive_enhancement]). Instead of React Router managing the network operations, the default browser behaviors step in.
If you ever find yourself entangled in managing and synchronizing state for network operations, React Router likely offers a more elegant solution.
[use_navigation]: https://api.reactrouter.com/v7/functions/react-router.useNavigation
[use_fetcher]: https://api.reactrouter.com/v7/functions/react-router.useFetcher
[loader_data]: ../start/framework/data-loading
[action_data]: ../start/framework/actions
[cookies]: ./sessions-and-cookies#cookies
[sessions]: ./sessions-and-cookies#sessions
[cache_control_header]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
[pending_ui]: ../start/framework/pending-ui
[client_data]: ../how-to/client-data
[window_global]: https://developer.mozilla.org/en-US/docs/Web/API/Window/window
[local_storage_global]: https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage
[progressive_enhancement]: ./progressive-enhancement | unknown | github | https://github.com/remix-run/react-router | docs/explanation/state-management.md |
#ifdef USE_VULKAN_API
#include <ATen/native/quantized/PackedParams.h>
#include <ATen/native/vulkan/ops/Batchnorm.h>
#include <ATen/native/vulkan/ops/Common.h>
#include <ATen/native/vulkan/ops/Convolution.h>
#include <ATen/native/vulkan/ops/Gru.h>
#include <ATen/native/vulkan/ops/Layernorm.h>
#include <ATen/native/vulkan/ops/Lstm.h>
#include <ATen/native/vulkan/ops/Mm.h>
#include <ATen/native/vulkan/ops/QuantizedFunctions.h>
#include <ATen/native/vulkan/ops/Register.h>
#include <torch/custom_class.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
int register_vulkan_conv2d_packed_context() {
static auto register_vulkan_conv2d_context =
torch::selective_class_<Conv2dPackedContext>(
"vulkan", TORCH_SELECTIVE_CLASS("Conv2dPackedContext"))
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<Conv2dPackedContext>& context) {
// context is packed
return context->unpack();
},
// __setstate__
[](c10::impl::GenericList state) {
// state is unpacked
return c10::make_intrusive<Conv2dPackedContext>(
Conv2dPackedContext::pack(state));
});
return 0;
}
int register_vulkan_conv1d_packed_context() {
static auto register_vulkan_conv1d_context =
torch::selective_class_<Conv1dPackedContext>(
"vulkan", TORCH_SELECTIVE_CLASS("Conv1dPackedContext"))
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<Conv1dPackedContext>& context) {
// context is packed
return context->unpack();
},
// __setstate__
[](c10::impl::GenericList state) {
// state is unpacked
return c10::make_intrusive<Conv1dPackedContext>(
Conv1dPackedContext::pack(state));
});
return 0;
}
int register_vulkan_linear_packed_context() {
static auto register_vulkan_linear_context =
torch::selective_class_<LinearPackedContext>(
"vulkan", TORCH_SELECTIVE_CLASS("LinearPackedContext"))
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<LinearPackedContext>& context) {
// context is packed
return context->unpack();
},
// __setstate__
[](c10::impl::GenericList state) {
// state is unpacked
return c10::make_intrusive<LinearPackedContext>(
LinearPackedContext::pack(state));
});
return 0;
}
int register_vulkan_layernorm_packed_context() {
static auto register_vulkan_layernorm_context =
torch::selective_class_<LayernormPackedContext>(
"vulkan", TORCH_SELECTIVE_CLASS("LayernormPackedContext"))
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<LayernormPackedContext>& context) {
// context is packed
return context->unpack();
},
// __setstate__
[](c10::impl::GenericList state) {
// state is unpacked
return c10::make_intrusive<LayernormPackedContext>(
LayernormPackedContext::pack(state));
});
return 0;
}
namespace {
TORCH_LIBRARY(vulkan, m) {
m.class_<BatchNormPackedContext>("BatchNormPackedContext")
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<BatchNormPackedContext>& context) {
// context is packed
return context->unpack();
},
// __setstate__
[](c10::impl::GenericList state) {
// state is unpacked
return c10::make_intrusive<BatchNormPackedContext>(
BatchNormPackedContext::pack(state));
});
m.class_<GruPackedContext>("GruPackedContext")
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<GruPackedContext>& context) {
// context is packed
return context->unpack();
},
// __setstate__
[](c10::impl::GenericList state) {
// state is unpacked
return c10::make_intrusive<GruPackedContext>(
GruPackedContext::pack(state));
});
m.class_<LstmPackedContext>("LstmPackedContext")
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<LstmPackedContext>& context) {
// context is packed
return context->unpack();
},
// __setstate__
[](c10::impl::GenericList state) {
// state is unpacked
return c10::make_intrusive<LstmPackedContext>(
LstmPackedContext::pack(state));
});
register_vulkan_conv2d_packed_context();
register_vulkan_conv1d_packed_context();
register_vulkan_linear_packed_context();
register_vulkan_layernorm_packed_context();
// To maintain backwards compatibility.
m.class_<Conv2dOpContext>("Conv2dOpContext")
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<Conv2dOpContext>& context) {
return context->unpack();
},
// __setstate__
[](Conv2dOpContext::State state) {
return std::apply(conv2d_clamp_prepack, std::move(state));
});
}
TORCH_LIBRARY(vulkan_prepack, m) {
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_conv2d_context(Tensor W, Tensor? B, int[2] stride, "
"int[2] padding, int[2] dilation, int groups, "
"Scalar? output_min=None, Scalar? output_max=None) "
"-> __torch__.torch.classes.vulkan.Conv2dPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA( // Backwards compatibility
"vulkan_prepack::conv2d_clamp_prepack(Tensor W, Tensor? B, int[2] stride, "
"int[2] padding, int[2] dilation, int groups, "
"Scalar? output_min=None, Scalar? output_max=None) "
"-> __torch__.torch.classes.vulkan.Conv2dOpContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_conv2d_context(Tensor X, "
"__torch__.torch.classes.vulkan.Conv2dPackedContext W_prepack) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA( // Backwards compatibility
"vulkan_prepack::conv2d_clamp_run(Tensor X, "
"__torch__.torch.classes.vulkan.Conv2dOpContext W_prepack) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_tconv2d_context(Tensor W, Tensor? B, int[2] stride, "
"int[2] padding, int[2] output_padding, int[2] dilation, int groups, "
"Scalar? output_min=None, Scalar? output_max=None) "
"-> __torch__.torch.classes.vulkan.Conv2dPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_tconv2d_context(Tensor X, "
"__torch__.torch.classes.vulkan.Conv2dPackedContext W_prepack) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_qconv2d_context(Tensor W, Tensor? B, "
"int[2] stride, int[2] padding, int[2] dilation, int groups, "
"Scalar? output_min=None, Scalar? output_max=None) "
"-> __torch__.torch.classes.vulkan.Conv2dPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_qconv2d_context(Tensor X, float scale, int zero_point, "
"__torch__.torch.classes.vulkan.Conv2dPackedContext vk_context) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_conv1d_context(Tensor W, Tensor? B, int[2] stride, "
"int[2] padding, int[2] dilation, int groups) "
"-> __torch__.torch.classes.vulkan.Conv1dPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_conv1d_context(Tensor X, "
"__torch__.torch.classes.vulkan.Conv1dPackedContext W_prepack) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_qtconv2d_context(Tensor W, Tensor? B, int[2] stride, "
"int[2] padding, int[2] output_padding, int[2] dilation, int groups, "
"Scalar? output_min=None, Scalar? output_max=None) "
"-> __torch__.torch.classes.vulkan.Conv2dPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_linear_context(Tensor W, Tensor? B) "
"-> __torch__.torch.classes.vulkan.LinearPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_linear_context(Tensor X, "
"__torch__.torch.classes.vulkan.LinearPackedContext BW_prepack) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_qlinear_context(Tensor X, float scale, int zero_point, "
"__torch__.torch.classes.vulkan.LinearPackedContext vk_context) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_layernorm_context(Tensor? W, Tensor? B, float eps) "
"-> __torch__.torch.classes.vulkan.LayernormPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_layernorm_context(Tensor X, SymInt[] normalized_shape, "
"__torch__.torch.classes.vulkan.LayernormPackedContext BW_prepack) -> Tensor Y"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_gru_context(Tensor[] params_cpu, "
"bool has_biases, "
"int num_layers, "
"float dropout, "
"bool train, "
"bool bidirectional, "
"bool batch_first) "
"-> __torch__.torch.classes.vulkan.GruPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_gru_context(Tensor input_vk, "
"Tensor hx_vk, "
"__torch__.torch.classes.vulkan.GruPackedContext G_prepack) -> (Tensor next_input, Tensor hidden_layer)"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_lstm_context(Tensor[] params_cpu, "
"bool has_biases, "
"int num_layers, "
"float dropout, "
"bool train, "
"bool bidirectional, "
"bool batch_first) "
"-> __torch__.torch.classes.vulkan.LstmPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_lstm_context(Tensor input_vk, "
"Tensor hx_vk, "
"Tensor cx_vk, "
"__torch__.torch.classes.vulkan.LstmPackedContext L_prepack) -> (Tensor next_input, Tensor hidden_state, Tensor cell_state)"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::create_batchnorm_context("
"Tensor? weight_opt, "
"Tensor? bias_opt, "
"Tensor? running_mean_opt, "
"Tensor? running_var_opt, "
"bool training, "
"float momentum, "
"float eps, "
"bool cudnn_enable) "
"-> __torch__.torch.classes.vulkan.BatchNormPackedContext"));
m.def(TORCH_SELECTIVE_SCHEMA(
"vulkan_prepack::run_batchnorm_context("
"Tensor input_vk, "
"__torch__.torch.classes.vulkan.BatchNormPackedContext context) "
"-> Tensor out"));
}
TORCH_LIBRARY_IMPL(vulkan_prepack, CPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_conv2d_context"),
TORCH_FN(create_conv2d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::conv2d_clamp_prepack"),
TORCH_FN(conv2d_clamp_prepack)); // Backwards compatibility
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_tconv2d_context"),
TORCH_FN(create_tconv2d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_conv1d_context"),
TORCH_FN(create_conv1d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_linear_context"),
TORCH_FN(create_linear_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_layernorm_context"),
TORCH_FN(create_layernorm_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_gru_context"),
TORCH_FN(create_gru_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_lstm_context"),
TORCH_FN(create_lstm_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_batchnorm_context"),
TORCH_FN(create_batchnorm_context));
}
TORCH_LIBRARY_IMPL(vulkan_prepack, QuantizedCPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_qconv2d_context"),
TORCH_FN(create_qconv2d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::create_qtconv2d_context"),
TORCH_FN(create_qtconv2d_context));
}
TORCH_LIBRARY_IMPL(vulkan_prepack, Vulkan, m) {
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_conv2d_context"),
TORCH_FN(run_conv2d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::conv2d_clamp_run"),
TORCH_FN(conv2d_clamp_run)); // Backwards compatibility
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_tconv2d_context"),
TORCH_FN(run_tconv2d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_qconv2d_context"),
TORCH_FN(run_qconv2d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_conv1d_context"),
TORCH_FN(run_conv1d_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_linear_context"),
TORCH_FN(run_linear_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_layernorm_context"),
TORCH_FN(run_layernorm_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_qlinear_context"),
TORCH_FN(run_qlinear_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_gru_context"),
TORCH_FN(run_gru_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_lstm_context"),
TORCH_FN(run_lstm_context));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_prepack::run_batchnorm_context"),
TORCH_FN(run_batchnorm_context));
}
TORCH_LIBRARY(vulkan_quantized, m) {
m.def(
TORCH_SELECTIVE_SCHEMA("vulkan_quantized::add(Tensor qa, "
"Tensor qb, "
"float scale, "
"int zero_point) -> Tensor qc"));
m.def(
TORCH_SELECTIVE_SCHEMA("vulkan_quantized::sub(Tensor qa, "
"Tensor qb, "
"float scale, "
"int zero_point)-> Tensor qc"));
m.def(
TORCH_SELECTIVE_SCHEMA("vulkan_quantized::mul(Tensor qa, "
"Tensor qb, "
"float scale, "
"int zero_point)-> Tensor qc"));
m.def(
TORCH_SELECTIVE_SCHEMA("vulkan_quantized::div(Tensor qa, "
"Tensor qb, "
"float scale, "
"int zero_point)-> Tensor qc"));
}
TORCH_LIBRARY_IMPL(vulkan_quantized, Vulkan, m) {
m.impl(
TORCH_SELECTIVE_NAME("vulkan_quantized::add"), TORCH_FN(quantized_add));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_quantized::sub"), TORCH_FN(quantized_sub));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_quantized::mul"), TORCH_FN(quantized_mul));
m.impl(
TORCH_SELECTIVE_NAME("vulkan_quantized::div"), TORCH_FN(quantized_div));
}
} // namespace
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */ | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/vulkan/ops/Register.cpp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A detachable toolbox for showing console printouts from the Crazyflie
"""
__author__ = 'Bitcraze AB'
__all__ = ['ConsoleToolbox']
import sys, time
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import Qt, pyqtSlot, pyqtSignal
console_class = uic.loadUiType(sys.path[0] + "/cfclient/ui/toolboxes/consoleToolbox.ui")[0]
class ConsoleToolbox(QtGui.QWidget, console_class):
"""Console toolbox for showing printouts from the Crazyflie"""
update = pyqtSignal(str)
def __init__(self, helper, *args):
super(ConsoleToolbox, self).__init__(*args)
self.setupUi(self)
self.update.connect(self.console.insertPlainText)
self.helper = helper
def getName(self):
return 'Console'
def enable(self):
self.helper.cf.console.receivedChar.add_callback(self.update.emit)
def disable(self):
self.helper.cf.console.receivedChar.remove_callback(self.update.emit)
def preferedDockArea(self):
return Qt.BottomDockWidgetArea | unknown | codeparrot/codeparrot-clean | ||
////////////////////////////////////////////////////////////////////////////
//
// Copyright 2014 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
import Foundation
import RealmSwift
import Realm
final class SwiftStringObject: Object {
@objc dynamic var stringCol = ""
convenience init(stringCol: String) {
self.init()
self.stringCol = stringCol
}
}
class ModernSwiftStringObject: Object {
@Persisted var stringCol = ""
}
class ModernSwiftStringProjection: Projection<ModernSwiftStringObject> {
@Projected(\ModernSwiftStringObject.stringCol) var string
}
class SwiftBoolObject: Object, Identifiable {
@objc dynamic var boolCol = false
}
class SwiftIntObject: Object {
@objc dynamic var intCol = 0
}
class SwiftInt8Object: Object {
@objc dynamic var int8Col = 0
}
class SwiftInt16Object: Object {
@objc dynamic var int16Col = 0
}
class SwiftInt32Object: Object {
@objc dynamic var int32Col = 0
}
class SwiftInt64Object: Object {
@objc dynamic var int64Col = 0
}
class SwiftLongObject: Object {
@objc dynamic var longCol: Int64 = 0
}
@objc enum IntEnum: Int, RealmEnum, Codable {
case value1 = 1
case value2 = 3
}
class SwiftObject: Object {
@objc dynamic var boolCol = false
@objc dynamic var intCol = 123
@objc dynamic var int8Col: Int8 = 123
@objc dynamic var int16Col: Int16 = 123
@objc dynamic var int32Col: Int32 = 123
@objc dynamic var int64Col: Int64 = 123
@objc dynamic var intEnumCol = IntEnum.value1
@objc dynamic var floatCol = 1.23 as Float
@objc dynamic var doubleCol = 12.3
@objc dynamic var stringCol = "a"
@objc dynamic var binaryCol = Data("a".utf8)
@objc dynamic var dateCol = Date(timeIntervalSince1970: 1)
@objc dynamic var decimalCol = Decimal128("123e4")
@objc dynamic var objectIdCol = ObjectId("1234567890ab1234567890ab")
@objc dynamic var objectCol: SwiftBoolObject? = SwiftBoolObject()
@objc dynamic var uuidCol: UUID = UUID(uuidString: "137decc8-b300-4954-a233-f89909f4fd89")!
let anyCol = RealmProperty<AnyRealmValue>()
let arrayCol = List<SwiftBoolObject>()
let setCol = MutableSet<SwiftBoolObject>()
let mapCol = Map<String, SwiftBoolObject?>()
class func defaultValues() -> [String: Any] {
return [
"boolCol": false,
"intCol": 123,
"int8Col": 123 as Int8,
"int16Col": 123 as Int16,
"int32Col": 123 as Int32,
"int64Col": 123 as Int64,
"floatCol": 1.23 as Float,
"doubleCol": 12.3,
"stringCol": "a",
"binaryCol": Data("a".utf8),
"dateCol": Date(timeIntervalSince1970: 1),
"decimalCol": Decimal128("123e4"),
"objectIdCol": ObjectId("1234567890ab1234567890ab"),
"objectCol": [false],
"uuidCol": UUID(uuidString: "137decc8-b300-4954-a233-f89909f4fd89")!
]
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftOptionalObject: Object {
@objc dynamic var optNSStringCol: NSString?
@objc dynamic var optStringCol: String?
@objc dynamic var optBinaryCol: Data?
@objc dynamic var optDateCol: Date?
@objc dynamic var optDecimalCol: Decimal128?
@objc dynamic var optObjectIdCol: ObjectId?
@objc dynamic var optUuidCol: UUID?
let optIntCol = RealmOptional<Int>()
let optInt8Col = RealmOptional<Int8>()
let optInt16Col = RealmOptional<Int16>()
let optInt32Col = RealmOptional<Int32>()
let optInt64Col = RealmOptional<Int64>()
let optFloatCol = RealmOptional<Float>()
let optDoubleCol = RealmOptional<Double>()
let optBoolCol = RealmOptional<Bool>()
let optEnumCol = RealmOptional<IntEnum>()
let otherIntCol = RealmProperty<Int?>()
@objc dynamic var optObjectCol: SwiftBoolObject?
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftOptionalPrimaryObject: SwiftOptionalObject {
let id = RealmOptional<Int>()
override class func primaryKey() -> String? { return "id" }
}
class SwiftListObject: Object {
let int = List<Int>()
let int8 = List<Int8>()
let int16 = List<Int16>()
let int32 = List<Int32>()
let int64 = List<Int64>()
let float = List<Float>()
let double = List<Double>()
let string = List<String>()
let data = List<Data>()
let date = List<Date>()
let decimal = List<Decimal128>()
let objectId = List<ObjectId>()
let uuid = List<UUID>()
let any = List<AnyRealmValue>()
let intOpt = List<Int?>()
let int8Opt = List<Int8?>()
let int16Opt = List<Int16?>()
let int32Opt = List<Int32?>()
let int64Opt = List<Int64?>()
let floatOpt = List<Float?>()
let doubleOpt = List<Double?>()
let stringOpt = List<String?>()
let dataOpt = List<Data?>()
let dateOpt = List<Date?>()
let decimalOpt = List<Decimal128?>()
let objectIdOpt = List<ObjectId?>()
let uuidOpt = List<UUID?>()
}
class SwiftMutableSetObject: Object {
let int = MutableSet<Int>()
let int8 = MutableSet<Int8>()
let int16 = MutableSet<Int16>()
let int32 = MutableSet<Int32>()
let int64 = MutableSet<Int64>()
let float = MutableSet<Float>()
let double = MutableSet<Double>()
let string = MutableSet<String>()
let data = MutableSet<Data>()
let date = MutableSet<Date>()
let decimal = MutableSet<Decimal128>()
let objectId = MutableSet<ObjectId>()
let uuid = MutableSet<UUID>()
let any = MutableSet<AnyRealmValue>()
let intOpt = MutableSet<Int?>()
let int8Opt = MutableSet<Int8?>()
let int16Opt = MutableSet<Int16?>()
let int32Opt = MutableSet<Int32?>()
let int64Opt = MutableSet<Int64?>()
let floatOpt = MutableSet<Float?>()
let doubleOpt = MutableSet<Double?>()
let stringOpt = MutableSet<String?>()
let dataOpt = MutableSet<Data?>()
let dateOpt = MutableSet<Date?>()
let decimalOpt = MutableSet<Decimal128?>()
let objectIdOpt = MutableSet<ObjectId?>()
let uuidOpt = MutableSet<UUID?>()
}
class SwiftMapObject: Object {
let int = Map<String, Int>()
let int8 = Map<String, Int8>()
let int16 = Map<String, Int16>()
let int32 = Map<String, Int32>()
let int64 = Map<String, Int64>()
let float = Map<String, Float>()
let double = Map<String, Double>()
let bool = Map<String, Bool>()
let string = Map<String, String>()
let data = Map<String, Data>()
let date = Map<String, Date>()
let decimal = Map<String, Decimal128>()
let objectId = Map<String, ObjectId>()
let uuid = Map<String, UUID>()
let object = Map<String, SwiftStringObject?>()
let any = Map<String, AnyRealmValue>()
let intOpt = Map<String, Int?>()
let int8Opt = Map<String, Int8?>()
let int16Opt = Map<String, Int16?>()
let int32Opt = Map<String, Int32?>()
let int64Opt = Map<String, Int64?>()
let floatOpt = Map<String, Float?>()
let doubleOpt = Map<String, Double?>()
let boolOpt = Map<String, Bool?>()
let stringOpt = Map<String, String?>()
let dataOpt = Map<String, Data?>()
let dateOpt = Map<String, Date?>()
let decimalOpt = Map<String, Decimal128?>()
let objectIdOpt = Map<String, ObjectId?>()
let uuidOpt = Map<String, UUID?>()
}
class SwiftImplicitlyUnwrappedOptionalObject: Object {
@objc dynamic var optNSStringCol: NSString!
@objc dynamic var optStringCol: String!
@objc dynamic var optBinaryCol: Data!
@objc dynamic var optDateCol: Date!
@objc dynamic var optDecimalCol: Decimal128!
@objc dynamic var optObjectIdCol: ObjectId!
@objc dynamic var optObjectCol: SwiftBoolObject!
@objc dynamic var optUuidCol: UUID!
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftOptionalDefaultValuesObject: Object {
@objc dynamic var optNSStringCol: NSString? = "A"
@objc dynamic var optStringCol: String? = "B"
@objc dynamic var optBinaryCol: Data? = Data("C".utf8)
@objc dynamic var optDateCol: Date? = Date(timeIntervalSince1970: 10)
@objc dynamic var optDecimalCol: Decimal128? = "123"
@objc dynamic var optObjectIdCol: ObjectId? = ObjectId("1234567890ab1234567890ab")
@objc dynamic var optUuidCol: UUID? = UUID(uuidString: "00000000-0000-0000-0000-000000000000")
let optIntCol = RealmOptional<Int>(1)
let optInt8Col = RealmOptional<Int8>(1)
let optInt16Col = RealmOptional<Int16>(1)
let optInt32Col = RealmOptional<Int32>(1)
let optInt64Col = RealmOptional<Int64>(1)
let optFloatCol = RealmOptional<Float>(2.2)
let optDoubleCol = RealmOptional<Double>(3.3)
let optBoolCol = RealmOptional<Bool>(true)
@objc dynamic var optObjectCol: SwiftBoolObject? = SwiftBoolObject(value: [true])
class func defaultValues() -> [String: Any] {
return [
"optNSStringCol": "A",
"optStringCol": "B",
"optBinaryCol": Data("C".utf8),
"optDateCol": Date(timeIntervalSince1970: 10),
"optDecimalCol": Decimal128("123"),
"optObjectIdCol": ObjectId("1234567890ab1234567890ab"),
"optIntCol": 1,
"optInt8Col": Int8(1),
"optInt16Col": Int16(1),
"optInt32Col": Int32(1),
"optInt64Col": Int64(1),
"optFloatCol": 2.2 as Float,
"optDoubleCol": 3.3,
"optBoolCol": true,
"optUuidCol": UUID(uuidString: "00000000-0000-0000-0000-000000000000")!
]
}
}
class SwiftOptionalIgnoredPropertiesObject: Object {
@objc dynamic var id = 0
@objc dynamic var optNSStringCol: NSString? = "A"
@objc dynamic var optStringCol: String? = "B"
@objc dynamic var optBinaryCol: Data? = Data("C".utf8)
@objc dynamic var optDateCol: Date? = Date(timeIntervalSince1970: 10)
@objc dynamic var optDecimalCol: Decimal128? = "123"
@objc dynamic var optObjectIdCol: ObjectId? = ObjectId("1234567890ab1234567890ab")
@objc dynamic var optObjectCol: SwiftBoolObject? = SwiftBoolObject(value: [true])
override class func ignoredProperties() -> [String] {
return [
"optNSStringCol",
"optStringCol",
"optBinaryCol",
"optDateCol",
"optDecimalCol",
"optObjectIdCol",
"optObjectCol"
]
}
}
class SwiftDogObject: Object {
@objc dynamic var dogName = ""
let owners = LinkingObjects(fromType: SwiftOwnerObject.self, property: "dog")
}
class SwiftOwnerObject: Object {
@objc dynamic var name = ""
@objc dynamic var dog: SwiftDogObject? = SwiftDogObject()
}
class SwiftAggregateObject: Object {
@objc dynamic var intCol = 0
@objc dynamic var int8Col: Int8 = 0
@objc dynamic var int16Col: Int16 = 0
@objc dynamic var int32Col: Int32 = 0
@objc dynamic var int64Col: Int64 = 0
@objc dynamic var floatCol = 0 as Float
@objc dynamic var doubleCol = 0.0
@objc dynamic var decimalCol = 0.0 as Decimal128
@objc dynamic var boolCol = false
@objc dynamic var dateCol = Date()
@objc dynamic var trueCol = true
let stringListCol = List<SwiftStringObject>()
}
class SwiftAllIntSizesObject: Object {
@objc dynamic var int8: Int8 = 0
@objc dynamic var int16: Int16 = 0
@objc dynamic var int32: Int32 = 0
@objc dynamic var int64: Int64 = 0
}
class SwiftEmployeeObject: Object {
@objc dynamic var name = ""
@objc dynamic var age = 0
@objc dynamic var hired = false
}
class SwiftCompanyObject: Object {
@objc dynamic var name = ""
let employees = List<SwiftEmployeeObject>()
let employeeSet = MutableSet<SwiftEmployeeObject>()
let employeeMap = Map<String, SwiftEmployeeObject?>()
}
class SwiftArrayPropertyObject: Object {
@objc dynamic var name = ""
let array = List<SwiftStringObject>()
let intArray = List<SwiftIntObject>()
let swiftObjArray = List<SwiftObject>()
}
class SwiftMutableSetPropertyObject: Object {
@objc dynamic var name = ""
let set = MutableSet<SwiftStringObject>()
let intSet = MutableSet<SwiftIntObject>()
let swiftObjSet = MutableSet<SwiftObject>()
}
class SwiftMapPropertyObject: Object {
@objc dynamic var name = ""
let map = Map<String, SwiftStringObject?>()
let intMap = Map<String, SwiftIntObject?>()
let swiftObjectMap = Map<String, SwiftObject?>()
let dogMap = Map<String, SwiftDogObject?>()
}
class SwiftDoubleListOfSwiftObject: Object {
let array = List<SwiftListOfSwiftObject>()
}
class SwiftListOfSwiftObject: Object {
let array = List<SwiftObject>()
}
class SwiftMutableSetOfSwiftObject: Object {
let set = MutableSet<SwiftObject>()
}
class SwiftMapOfSwiftObject: Object {
let map = Map<String, SwiftObject?>()
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftMapOfSwiftOptionalObject: Object {
let map = Map<String, SwiftOptionalObject?>()
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftListOfSwiftOptionalObject: Object {
let array = List<SwiftOptionalObject>()
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftMutableSetOfSwiftOptionalObject: Object {
let set = MutableSet<SwiftOptionalObject>()
}
class SwiftArrayPropertySubclassObject: SwiftArrayPropertyObject {
let boolArray = List<SwiftBoolObject>()
}
class SwiftLinkToPrimaryStringObject: Object {
@objc dynamic var pk = ""
@objc dynamic var object: SwiftPrimaryStringObject?
let objects = List<SwiftPrimaryStringObject>()
override class func primaryKey() -> String? {
return "pk"
}
}
class SwiftUTF8Object: Object {
// swiftlint:disable:next identifier_name
@objc dynamic var 柱колоéнǢкƱаم👍 = "值значен™👍☞⎠‱௹♣︎☐▼❒∑⨌⧭иеمرحبا"
}
class SwiftIgnoredPropertiesObject: Object {
@objc dynamic var name = ""
@objc dynamic var age = 0
@objc dynamic var runtimeProperty: AnyObject?
@objc dynamic var runtimeDefaultProperty = "property"
@objc dynamic var readOnlyProperty: Int { return 0 }
override class func ignoredProperties() -> [String] {
return ["runtimeProperty", "runtimeDefaultProperty"]
}
}
class SwiftRecursiveObject: Object {
let objects = List<SwiftRecursiveObject>()
let objectSet = MutableSet<SwiftRecursiveObject>()
}
protocol SwiftPrimaryKeyObjectType {
associatedtype PrimaryKey
static func primaryKey() -> String?
}
class SwiftPrimaryStringObject: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
@objc dynamic var intCol = 0
typealias PrimaryKey = String
override class func primaryKey() -> String? {
return "stringCol"
}
}
class SwiftPrimaryOptionalStringObject: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol: String? = ""
@objc dynamic var intCol = 0
typealias PrimaryKey = String?
override class func primaryKey() -> String? {
return "stringCol"
}
}
class SwiftPrimaryIntObject: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
@objc dynamic var intCol = 0
typealias PrimaryKey = Int
override class func primaryKey() -> String? {
return "intCol"
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftPrimaryOptionalIntObject: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
let intCol = RealmOptional<Int>()
typealias PrimaryKey = RealmOptional<Int>
override class func primaryKey() -> String? {
return "intCol"
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftPrimaryInt8Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
@objc dynamic var int8Col: Int8 = 0
typealias PrimaryKey = Int8
override class func primaryKey() -> String? {
return "int8Col"
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftPrimaryOptionalInt8Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
let int8Col = RealmOptional<Int8>()
typealias PrimaryKey = RealmOptional<Int8>
override class func primaryKey() -> String? {
return "int8Col"
}
}
class SwiftPrimaryInt16Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
@objc dynamic var int16Col: Int16 = 0
typealias PrimaryKey = Int16
override class func primaryKey() -> String? {
return "int16Col"
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftPrimaryOptionalInt16Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
let int16Col = RealmOptional<Int16>()
typealias PrimaryKey = RealmOptional<Int16>
override class func primaryKey() -> String? {
return "int16Col"
}
}
class SwiftPrimaryInt32Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
@objc dynamic var int32Col: Int32 = 0
typealias PrimaryKey = Int32
override class func primaryKey() -> String? {
return "int32Col"
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftPrimaryOptionalInt32Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
let int32Col = RealmOptional<Int32>()
typealias PrimaryKey = RealmOptional<Int32>
override class func primaryKey() -> String? {
return "int32Col"
}
}
class SwiftPrimaryInt64Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
@objc dynamic var int64Col: Int64 = 0
typealias PrimaryKey = Int64
override class func primaryKey() -> String? {
return "int64Col"
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftPrimaryOptionalInt64Object: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var stringCol = ""
let int64Col = RealmOptional<Int64>()
typealias PrimaryKey = RealmOptional<Int64>
override class func primaryKey() -> String? {
return "int64Col"
}
}
class SwiftPrimaryUUIDObject: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var uuidCol: UUID = UUID(uuidString: "85d4fbee-6ec6-47df-bfa1-615931903d7e")!
@objc dynamic var stringCol = ""
typealias PrimaryKey = Int64
override class func primaryKey() -> String? {
return "uuidCol"
}
}
class SwiftPrimaryObjectIdObject: Object, SwiftPrimaryKeyObjectType {
@objc dynamic var objectIdCol: ObjectId = ObjectId.generate()
@objc dynamic var intCol = 0
typealias PrimaryKey = Int64
override class func primaryKey() -> String? {
return "objectIdCol"
}
}
class SwiftIndexedPropertiesObject: Object {
@objc dynamic var stringCol = ""
@objc dynamic var intCol = 0
@objc dynamic var int8Col: Int8 = 0
@objc dynamic var int16Col: Int16 = 0
@objc dynamic var int32Col: Int32 = 0
@objc dynamic var int64Col: Int64 = 0
@objc dynamic var boolCol = false
@objc dynamic var dateCol = Date()
@objc dynamic var uuidCol = UUID(uuidString: "85d4fbee-6ec6-47df-bfa1-615931903d7e")!
@objc dynamic var floatCol: Float = 0.0
@objc dynamic var doubleCol: Double = 0.0
@objc dynamic var dataCol = Data()
let anyCol = RealmProperty<AnyRealmValue>()
override class func indexedProperties() -> [String] {
return ["stringCol", "intCol", "int8Col", "int16Col",
"int32Col", "int64Col", "boolCol", "dateCol", "anyCol", "uuidCol"]
}
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftIndexedOptionalPropertiesObject: Object {
@objc dynamic var optionalStringCol: String? = ""
let optionalIntCol = RealmOptional<Int>()
let optionalInt8Col = RealmOptional<Int8>()
let optionalInt16Col = RealmOptional<Int16>()
let optionalInt32Col = RealmOptional<Int32>()
let optionalInt64Col = RealmOptional<Int64>()
let optionalBoolCol = RealmOptional<Bool>()
@objc dynamic var optionalDateCol: Date? = Date()
@objc dynamic var optionalUUIDCol: UUID? = UUID(uuidString: "85d4fbee-6ec6-47df-bfa1-615931903d7e")
let optionalFloatCol = RealmOptional<Float>()
let optionalDoubleCol = RealmOptional<Double>()
@objc dynamic var optionalDataCol: Data? = Data()
override class func indexedProperties() -> [String] {
return ["optionalStringCol", "optionalIntCol", "optionalInt8Col", "optionalInt16Col",
"optionalInt32Col", "optionalInt64Col", "optionalBoolCol", "optionalDateCol", "optionalUUIDCol"]
}
}
class SwiftCustomInitializerObject: Object {
@objc dynamic var stringCol: String
init(stringVal: String) {
stringCol = stringVal
super.init()
}
required override init() {
stringCol = ""
super.init()
}
}
class SwiftConvenienceInitializerObject: Object {
@objc dynamic var stringCol = ""
convenience init(stringCol: String) {
self.init()
self.stringCol = stringCol
}
}
class SwiftObjectiveCTypesObject: Object {
@objc dynamic var stringCol: NSString?
@objc dynamic var dateCol: NSDate?
@objc dynamic var dataCol: NSData?
}
class SwiftComputedPropertyNotIgnoredObject: Object {
@objc dynamic var _urlBacking = ""
// Dynamic; no ivar
@objc dynamic var dynamicURL: URL? {
get {
return URL(string: _urlBacking)
}
set {
_urlBacking = newValue?.absoluteString ?? ""
}
}
// Non-dynamic; no ivar
var url: URL? {
get {
return URL(string: _urlBacking)
}
set {
_urlBacking = newValue?.absoluteString ?? ""
}
}
}
@objc(SwiftObjcRenamedObject)
class SwiftObjcRenamedObject: Object {
@objc dynamic var stringCol = ""
}
@objc(SwiftObjcRenamedObjectWithTotallyDifferentName)
class SwiftObjcArbitrarilyRenamedObject: Object {
@objc dynamic var boolCol = false
}
class SwiftCircleObject: Object {
@objc dynamic var obj: SwiftCircleObject?
let array = List<SwiftCircleObject>()
}
// Exists to serve as a superclass to `SwiftGenericPropsOrderingObject`
class SwiftGenericPropsOrderingParent: Object {
var implicitlyIgnoredComputedProperty: Int { return 0 }
let implicitlyIgnoredReadOnlyProperty: Int = 1
let parentFirstList = List<SwiftIntObject>()
let parentFirstSet = MutableSet<SwiftIntObject>()
@objc dynamic var parentFirstNumber = 0
func parentFunction() -> Int { return parentFirstNumber + 1 }
@objc dynamic var parentSecondNumber = 1
var parentComputedProp: String { return "hello world" }
}
// Used to verify that Swift properties (generic and otherwise) are detected properly and
// added to the schema in the correct order.
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftGenericPropsOrderingObject: SwiftGenericPropsOrderingParent {
func myFunction() -> Int { return firstNumber + secondNumber + thirdNumber }
@objc dynamic var dynamicComputed: Int { return 999 }
var firstIgnored = 999
@objc dynamic var dynamicIgnored = 999
@objc dynamic var firstNumber = 0 // Managed property
class func myClassFunction(x: Int, y: Int) -> Int { return x + y }
var secondIgnored = 999
lazy var lazyIgnored = 999
let firstArray = List<SwiftStringObject>() // Managed property
let firstSet = MutableSet<SwiftStringObject>() // Managed property
@objc dynamic var secondNumber = 0 // Managed property
var computedProp: String { return "\(firstNumber), \(secondNumber), and \(thirdNumber)" }
let secondArray = List<SwiftStringObject>() // Managed property
let secondSet = MutableSet<SwiftStringObject>() // Managed property
override class func ignoredProperties() -> [String] {
return ["firstIgnored", "dynamicIgnored", "secondIgnored", "thirdIgnored", "lazyIgnored", "dynamicLazyIgnored"]
}
let firstOptionalNumber = RealmOptional<Int>() // Managed property
var thirdIgnored = 999
@objc dynamic lazy var dynamicLazyIgnored = 999
let firstLinking = LinkingObjects(fromType: SwiftGenericPropsOrderingHelper.self, property: "first")
let secondLinking = LinkingObjects(fromType: SwiftGenericPropsOrderingHelper.self, property: "second")
@objc dynamic var thirdNumber = 0 // Managed property
let secondOptionalNumber = RealmOptional<Int>() // Managed property
}
// Only exists to allow linking object properties on `SwiftGenericPropsNotLastObject`.
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
class SwiftGenericPropsOrderingHelper: Object {
@objc dynamic var first: SwiftGenericPropsOrderingObject?
@objc dynamic var second: SwiftGenericPropsOrderingObject?
}
class SwiftRenamedProperties1: Object {
@objc dynamic var propA = 0
@objc dynamic var propB = ""
let linking1 = LinkingObjects(fromType: LinkToSwiftRenamedProperties1.self, property: "linkA")
let linking2 = LinkingObjects(fromType: LinkToSwiftRenamedProperties2.self, property: "linkD")
override class func _realmObjectName() -> String { return "Swift Renamed Properties" }
override class func propertiesMapping() -> [String: String] {
return ["propA": "prop 1", "propB": "prop 2"]
}
}
class SwiftRenamedProperties2: Object {
@objc dynamic var propC = 0
@objc dynamic var propD = ""
let linking1 = LinkingObjects(fromType: LinkToSwiftRenamedProperties1.self, property: "linkA")
let linking2 = LinkingObjects(fromType: LinkToSwiftRenamedProperties2.self, property: "linkD")
override class func _realmObjectName() -> String { return "Swift Renamed Properties" }
override class func propertiesMapping() -> [String: String] {
return ["propC": "prop 1", "propD": "prop 2"]
}
}
class LinkToSwiftRenamedProperties1: Object {
@objc dynamic var linkA: SwiftRenamedProperties1?
@objc dynamic var linkB: SwiftRenamedProperties2?
let array1 = List<SwiftRenamedProperties1>()
let set1 = MutableSet<SwiftRenamedProperties1>()
override class func _realmObjectName() -> String { return "Link To Swift Renamed Properties" }
override class func propertiesMapping() -> [String: String] {
return ["linkA": "link 1", "linkB": "link 2", "array1": "array", "set1": "set"]
}
}
class LinkToSwiftRenamedProperties2: Object {
@objc dynamic var linkC: SwiftRenamedProperties1?
@objc dynamic var linkD: SwiftRenamedProperties2?
let array2 = List<SwiftRenamedProperties2>()
let set2 = MutableSet<SwiftRenamedProperties2>()
override class func _realmObjectName() -> String { return "Link To Swift Renamed Properties" }
override class func propertiesMapping() -> [String: String] {
return ["linkC": "link 1", "linkD": "link 2", "array2": "array", "set2": "set"]
}
}
class EmbeddedParentObject: Object {
@objc dynamic var object: EmbeddedTreeObject1?
let array = List<EmbeddedTreeObject1>()
let map = Map<String, EmbeddedTreeObject1?>()
}
class EmbeddedPrimaryParentObject: Object {
@objc dynamic var pk: Int = 0
@objc dynamic var object: EmbeddedTreeObject1?
let array = List<EmbeddedTreeObject1>()
override class func primaryKey() -> String? {
return "pk"
}
}
protocol EmbeddedTreeObject: EmbeddedObject {
var value: Int { get set }
}
class EmbeddedTreeObject1: EmbeddedObject, EmbeddedTreeObject {
@objc dynamic var value = 0
@objc dynamic var child: EmbeddedTreeObject2?
let children = List<EmbeddedTreeObject2>()
let parent1 = LinkingObjects(fromType: EmbeddedParentObject.self, property: "object")
let parent2 = LinkingObjects(fromType: EmbeddedParentObject.self, property: "array")
}
class EmbeddedTreeObject2: EmbeddedObject, EmbeddedTreeObject {
@objc dynamic var value = 0
@objc dynamic var child: EmbeddedTreeObject3?
let children = List<EmbeddedTreeObject3>()
let parent3 = LinkingObjects(fromType: EmbeddedTreeObject1.self, property: "child")
let parent4 = LinkingObjects(fromType: EmbeddedTreeObject1.self, property: "children")
}
class EmbeddedTreeObject3: EmbeddedObject, EmbeddedTreeObject {
@objc dynamic var value = 0
let parent3 = LinkingObjects(fromType: EmbeddedTreeObject2.self, property: "child")
let parent4 = LinkingObjects(fromType: EmbeddedTreeObject2.self, property: "children")
}
class ObjectWithNestedEmbeddedObject: Object {
@objc dynamic var value = 0
@objc dynamic var inner: NestedInnerClass?
@objc(ObjectWithNestedEmbeddedObject_NestedInnerClass)
class NestedInnerClass: EmbeddedObject {
@objc dynamic var value = 0
}
}
@objc(PrivateObjectSubclass)
private class PrivateObjectSubclass: Object {
@objc dynamic var value = 0
}
class LinkToOnlyComputed: Object {
@Persisted var value: Int = 0
@Persisted var link: OnlyComputedProps?
}
class OnlyComputedProps: Object {
@Persisted(originProperty: "link") var backlinks: LinkingObjects<LinkToOnlyComputed>
} | swift | github | https://github.com/realm/realm-swift | RealmSwift/Tests/SwiftTestObjects.swift |
// errorcheck
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that incorrect short declarations and redeclarations are detected.
// Does not compile.
package main
func f1() int { return 1 }
func f2() (float32, int) { return 1, 2 }
func f3() (float32, int, string) { return 1, 2, "3" }
func main() {
{
// simple redeclaration
i := f1()
i := f1() // ERROR "redeclared|no new"
_ = i
}
{
// change of type for f
i, f, s := f3()
f, g, t := f3() // ERROR "redeclared|cannot assign|incompatible|cannot use"
_, _, _, _, _ = i, f, s, g, t
}
{
// change of type for i
i, f, s := f3()
j, i, t := f3() // ERROR "redeclared|cannot assign|incompatible|cannot use"
_, _, _, _, _ = i, f, s, j, t
}
{
// no new variables
i, f, s := f3()
i, f := f2() // ERROR "redeclared|no new"
_, _, _ = i, f, s
}
{
// multiline no new variables
i := f1
i := func() int { // ERROR "redeclared|no new|incompatible"
return 0
}
_ = i
}
{
// single redeclaration
i, f, s := f3()
i := 1 // ERROR "redeclared|no new|incompatible"
_, _, _ = i, f, s
}
// double redeclaration
{
i, f, s := f3()
i, f := f2() // ERROR "redeclared|no new"
_, _, _ = i, f, s
}
{
// triple redeclaration
i, f, s := f3()
i, f, s := f3() // ERROR "redeclared|no new"
_, _, _ = i, f, s
}
} | go | github | https://github.com/golang/go | test/declbad.go |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import threading
from openerp.osv import osv, fields
from openerp import tools, SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools.mail import plaintext2html
class mail_followers(osv.Model):
""" mail_followers holds the data related to the follow mechanism inside
OpenERP. Partners can choose to follow documents (records) of any kind
that inherits from mail.thread. Following documents allow to receive
notifications for new messages.
A subscription is characterized by:
:param: res_model: model of the followed objects
:param: res_id: ID of resource (may be 0 for every objects)
"""
_name = 'mail.followers'
_rec_name = 'partner_id'
_log_access = False
_description = 'Document Followers'
_columns = {
'res_model': fields.char('Related Document Model',
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_id': fields.many2one('res.partner', string='Related Partner',
ondelete='cascade', required=True, select=1),
'subtype_ids': fields.many2many('mail.message.subtype', string='Subtype',
help="Message subtypes followed, meaning subtypes that will be pushed onto the user's Wall."),
}
#
# Modifying followers change access rights to individual documents. As the
# cache may contain accessible/inaccessible data, one has to refresh it.
#
def create(self, cr, uid, vals, context=None):
res = super(mail_followers, self).create(cr, uid, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(mail_followers, self).write(cr, uid, ids, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(mail_followers, self).unlink(cr, uid, ids, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
_sql_constraints = [('mail_followers_res_partner_res_model_id_uniq','unique(res_model,res_id,partner_id)','Error, a partner cannot follow twice the same object.')]
class mail_notification(osv.Model):
""" Class holding notifications pushed to partners. Followers and partners
added in 'contacts to notify' receive notifications. """
_name = 'mail.notification'
_rec_name = 'partner_id'
_log_access = False
_description = 'Notifications'
_columns = {
'partner_id': fields.many2one('res.partner', string='Contact',
ondelete='cascade', required=True, select=1),
'is_read': fields.boolean('Read', select=1, oldname='read'),
'starred': fields.boolean('Starred', select=1,
help='Starred message that goes into the todo mailbox'),
'message_id': fields.many2one('mail.message', string='Message',
ondelete='cascade', required=True, select=1),
}
_defaults = {
'is_read': False,
'starred': False,
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('mail_notification_partner_id_read_starred_message_id',))
if not cr.fetchone():
cr.execute('CREATE INDEX mail_notification_partner_id_read_starred_message_id ON mail_notification (partner_id, is_read, starred, message_id)')
def get_partners_to_email(self, cr, uid, ids, message, context=None):
""" Return the list of partners to notify, based on their preferences.
:param browse_record message: mail.message to notify
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
"""
notify_pids = []
for notification in self.browse(cr, uid, ids, context=context):
if notification.is_read:
continue
partner = notification.partner_id
# Do not send to partners without email address defined
if not partner.email:
continue
# Do not send to partners having same email address than the author (can cause loops or bounce effect due to messy database)
if message.author_id and message.author_id.email == partner.email:
continue
# Partner does not want to receive any emails or is opt-out
if partner.notify_email == 'none':
continue
notify_pids.append(partner.id)
return notify_pids
def get_signature_footer(self, cr, uid, user_id, res_model=None, res_id=None, context=None, user_signature=True):
""" Format a standard footer for notification emails (such as pushed messages
notification or invite emails).
Format:
<p>--<br />
Administrator
</p>
<div>
<small>Sent from <a ...>Your Company</a> using <a ...>OpenERP</a>.</small>
</div>
"""
footer = ""
if not user_id:
return footer
# add user signature
user = self.pool.get("res.users").browse(cr, SUPERUSER_ID, [user_id], context=context)[0]
if user_signature:
if user.signature:
signature = user.signature
else:
signature = "--<br />%s" % user.name
footer = tools.append_content_to_html(footer, signature, plaintext=False)
# add company signature
if user.company_id.website:
website_url = ('http://%s' % user.company_id.website) if not user.company_id.website.lower().startswith(('http:', 'https:')) \
else user.company_id.website
company = "<a style='color:inherit' href='%s'>%s</a>" % (website_url, user.company_id.name)
else:
company = user.company_id.name
sent_by = _('Sent by %(company)s using %(odoo)s')
signature_company = '<br /><small>%s</small>' % (sent_by % {
'company': company,
'odoo': "<a style='color:inherit' href='https://www.odoo.com/'>Odoo</a>"
})
footer = tools.append_content_to_html(footer, signature_company, plaintext=False, container_tag='div')
return footer
def update_message_notification(self, cr, uid, ids, message_id, partner_ids, context=None):
existing_pids = set()
new_pids = set()
new_notif_ids = []
for notification in self.browse(cr, uid, ids, context=context):
existing_pids.add(notification.partner_id.id)
# update existing notifications
self.write(cr, uid, ids, {'is_read': False}, context=context)
# create new notifications
new_pids = set(partner_ids) - existing_pids
for new_pid in new_pids:
new_notif_ids.append(self.create(cr, uid, {'message_id': message_id, 'partner_id': new_pid, 'is_read': False}, context=context))
return new_notif_ids
def _notify_email(self, cr, uid, ids, message_id, force_send=False, user_signature=True, context=None):
message = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
# compute partners
email_pids = self.get_partners_to_email(cr, uid, ids, message, context=context)
if not email_pids:
return True
# compute email body (signature, company data)
body_html = message.body
# add user signature except for mail groups, where users are usually adding their own signatures already
user_id = message.author_id and message.author_id.user_ids and message.author_id.user_ids[0] and message.author_id.user_ids[0].id or None
signature_company = self.get_signature_footer(cr, uid, user_id, res_model=message.model, res_id=message.res_id, context=context, user_signature=(user_signature and message.model != 'mail.group'))
if signature_company:
body_html = tools.append_content_to_html(body_html, signature_company, plaintext=False, container_tag='div')
# compute email references
references = message.parent_id.message_id if message.parent_id else False
# custom values
custom_values = dict()
if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_email_values'):
custom_values = self.pool[message.model].message_get_email_values(cr, uid, message.res_id, message, context=context)
# create email values
max_recipients = 50
chunks = [email_pids[x:x + max_recipients] for x in xrange(0, len(email_pids), max_recipients)]
email_ids = []
for chunk in chunks:
if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_recipient_values'):
recipient_values = self.pool[message.model].message_get_recipient_values(cr, uid, message.res_id, notif_message=message, recipient_ids=chunk, context=context)
else:
recipient_values = self.pool['mail.thread'].message_get_recipient_values(cr, uid, message.res_id, notif_message=message, recipient_ids=chunk, context=context)
mail_values = {
'mail_message_id': message.id,
'auto_delete': (context or {}).get('mail_auto_delete', True),
'mail_server_id': (context or {}).get('mail_server_id', False),
'body_html': body_html,
'references': references,
}
mail_values.update(custom_values)
mail_values.update(recipient_values)
email_ids.append(self.pool.get('mail.mail').create(cr, uid, mail_values, context=context))
# NOTE:
# 1. for more than 50 followers, use the queue system
# 2. do not send emails immediately if the registry is not loaded,
# to prevent sending email during a simple update of the database
# using the command-line.
if force_send and len(chunks) < 2 and \
(not self.pool._init or
getattr(threading.currentThread(), 'testing', False)):
self.pool.get('mail.mail').send(cr, uid, email_ids, context=context)
return True
def _notify(self, cr, uid, message_id, partners_to_notify=None, context=None,
force_send=False, user_signature=True):
""" Send by email the notification depending on the user preferences
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
:param bool force_send: if True, the generated mail.mail is
immediately sent after being created, as if the scheduler
was executed for this message only.
:param bool user_signature: if True, the generated mail.mail body is
the body of the related mail.message with the author's signature
"""
notif_ids = self.search(cr, SUPERUSER_ID, [('message_id', '=', message_id), ('partner_id', 'in', partners_to_notify)], context=context)
# update or create notifications
new_notif_ids = self.update_message_notification(cr, SUPERUSER_ID, notif_ids, message_id, partners_to_notify, context=context)
# mail_notify_noemail (do not send email) or no partner_ids: do not send, return
if context and context.get('mail_notify_noemail'):
return True
# browse as SUPERUSER_ID because of access to res_partner not necessarily allowed
self._notify_email(cr, SUPERUSER_ID, new_notif_ids, message_id, force_send, user_signature, context=context) | unknown | codeparrot/codeparrot-clean | ||
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.stats', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback [class]
module.add_class('DataOutputCallback', allow_subclassing=True)
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## file-helper.h (module 'stats'): ns3::FileHelper [class]
module.add_class('FileHelper')
## gnuplot.h (module 'stats'): ns3::Gnuplot [class]
module.add_class('Gnuplot')
## gnuplot.h (module 'stats'): ns3::GnuplotCollection [class]
module.add_class('GnuplotCollection')
## gnuplot.h (module 'stats'): ns3::GnuplotDataset [class]
module.add_class('GnuplotDataset')
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper [class]
module.add_class('GnuplotHelper')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## data-calculator.h (module 'stats'): ns3::StatisticalSummary [class]
module.add_class('StatisticalSummary', allow_subclassing=True)
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## traced-value.h (module 'core'): ns3::TracedValue<bool> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['bool'])
## traced-value.h (module 'core'): ns3::TracedValue<double> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['double'])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned char'])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned int'])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned short'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset [class]
module.add_class('Gnuplot2dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::Style [enumeration]
module.add_enum('Style', ['LINES', 'POINTS', 'LINES_POINTS', 'DOTS', 'IMPULSES', 'STEPS', 'FSTEPS', 'HISTEPS'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::ErrorBars [enumeration]
module.add_enum('ErrorBars', ['NONE', 'X', 'Y', 'XY'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dFunction [class]
module.add_class('Gnuplot2dFunction', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dDataset [class]
module.add_class('Gnuplot3dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dFunction [class]
module.add_class('Gnuplot3dFunction', parent=root_module['ns3::GnuplotDataset'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## data-calculator.h (module 'stats'): ns3::DataCalculator [class]
module.add_class('DataCalculator', parent=root_module['ns3::Object'])
## data-collection-object.h (module 'stats'): ns3::DataCollectionObject [class]
module.add_class('DataCollectionObject', parent=root_module['ns3::Object'])
## data-collector.h (module 'stats'): ns3::DataCollector [class]
module.add_class('DataCollector', parent=root_module['ns3::Object'])
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface [class]
module.add_class('DataOutputInterface', parent=root_module['ns3::Object'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## file-aggregator.h (module 'stats'): ns3::FileAggregator [class]
module.add_class('FileAggregator', parent=root_module['ns3::DataCollectionObject'])
## file-aggregator.h (module 'stats'): ns3::FileAggregator::FileType [enumeration]
module.add_enum('FileType', ['FORMATTED', 'SPACE_SEPARATED', 'COMMA_SEPARATED', 'TAB_SEPARATED'], outer_class=root_module['ns3::FileAggregator'])
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator [class]
module.add_class('GnuplotAggregator', parent=root_module['ns3::DataCollectionObject'])
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator::KeyLocation [enumeration]
module.add_enum('KeyLocation', ['NO_KEY', 'KEY_INSIDE', 'KEY_ABOVE', 'KEY_BELOW'], outer_class=root_module['ns3::GnuplotAggregator'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double> [class]
module.add_class('MinMaxAvgTotalCalculator', template_parameters=['double'], parent=[root_module['ns3::DataCalculator'], root_module['ns3::StatisticalSummary']])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## omnet-data-output.h (module 'stats'): ns3::OmnetDataOutput [class]
module.add_class('OmnetDataOutput', parent=root_module['ns3::DataOutputInterface'])
## probe.h (module 'stats'): ns3::Probe [class]
module.add_class('Probe', parent=root_module['ns3::DataCollectionObject'])
## sqlite-data-output.h (module 'stats'): ns3::SqliteDataOutput [class]
module.add_class('SqliteDataOutput', parent=root_module['ns3::DataOutputInterface'])
## time-data-calculators.h (module 'stats'): ns3::TimeMinMaxAvgTotalCalculator [class]
module.add_class('TimeMinMaxAvgTotalCalculator', parent=root_module['ns3::DataCalculator'])
## time-series-adaptor.h (module 'stats'): ns3::TimeSeriesAdaptor [class]
module.add_class('TimeSeriesAdaptor', parent=root_module['ns3::DataCollectionObject'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uinteger-16-probe.h (module 'stats'): ns3::Uinteger16Probe [class]
module.add_class('Uinteger16Probe', parent=root_module['ns3::Probe'])
## uinteger-32-probe.h (module 'stats'): ns3::Uinteger32Probe [class]
module.add_class('Uinteger32Probe', parent=root_module['ns3::Probe'])
## uinteger-8-probe.h (module 'stats'): ns3::Uinteger8Probe [class]
module.add_class('Uinteger8Probe', parent=root_module['ns3::Probe'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## boolean-probe.h (module 'stats'): ns3::BooleanProbe [class]
module.add_class('BooleanProbe', parent=root_module['ns3::Probe'])
## double-probe.h (module 'stats'): ns3::DoubleProbe [class]
module.add_class('DoubleProbe', parent=root_module['ns3::Probe'])
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::DataCalculator >, std::allocator< ns3::Ptr< ns3::DataCalculator > > >', u'ns3::DataCalculatorList')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::DataCalculator >, std::allocator< ns3::Ptr< ns3::DataCalculator > > >*', u'ns3::DataCalculatorList*')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::DataCalculator >, std::allocator< ns3::Ptr< ns3::DataCalculator > > >&', u'ns3::DataCalculatorList&')
typehandlers.add_type_alias(u'std::list< std::pair< std::string, std::string >, std::allocator< std::pair< std::string, std::string > > >', u'ns3::MetadataList')
typehandlers.add_type_alias(u'std::list< std::pair< std::string, std::string >, std::allocator< std::pair< std::string, std::string > > >*', u'ns3::MetadataList*')
typehandlers.add_type_alias(u'std::list< std::pair< std::string, std::string >, std::allocator< std::pair< std::string, std::string > > >&', u'ns3::MetadataList&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DataOutputCallback_methods(root_module, root_module['ns3::DataOutputCallback'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3FileHelper_methods(root_module, root_module['ns3::FileHelper'])
register_Ns3Gnuplot_methods(root_module, root_module['ns3::Gnuplot'])
register_Ns3GnuplotCollection_methods(root_module, root_module['ns3::GnuplotCollection'])
register_Ns3GnuplotDataset_methods(root_module, root_module['ns3::GnuplotDataset'])
register_Ns3GnuplotHelper_methods(root_module, root_module['ns3::GnuplotHelper'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3StatisticalSummary_methods(root_module, root_module['ns3::StatisticalSummary'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TracedValue__Bool_methods(root_module, root_module['ns3::TracedValue< bool >'])
register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >'])
register_Ns3TracedValue__Unsigned_char_methods(root_module, root_module['ns3::TracedValue< unsigned char >'])
register_Ns3TracedValue__Unsigned_int_methods(root_module, root_module['ns3::TracedValue< unsigned int >'])
register_Ns3TracedValue__Unsigned_short_methods(root_module, root_module['ns3::TracedValue< unsigned short >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Gnuplot2dDataset_methods(root_module, root_module['ns3::Gnuplot2dDataset'])
register_Ns3Gnuplot2dFunction_methods(root_module, root_module['ns3::Gnuplot2dFunction'])
register_Ns3Gnuplot3dDataset_methods(root_module, root_module['ns3::Gnuplot3dDataset'])
register_Ns3Gnuplot3dFunction_methods(root_module, root_module['ns3::Gnuplot3dFunction'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataCalculator_methods(root_module, root_module['ns3::DataCalculator'])
register_Ns3DataCollectionObject_methods(root_module, root_module['ns3::DataCollectionObject'])
register_Ns3DataCollector_methods(root_module, root_module['ns3::DataCollector'])
register_Ns3DataOutputInterface_methods(root_module, root_module['ns3::DataOutputInterface'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3FileAggregator_methods(root_module, root_module['ns3::FileAggregator'])
register_Ns3GnuplotAggregator_methods(root_module, root_module['ns3::GnuplotAggregator'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, root_module['ns3::MinMaxAvgTotalCalculator< double >'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OmnetDataOutput_methods(root_module, root_module['ns3::OmnetDataOutput'])
register_Ns3Probe_methods(root_module, root_module['ns3::Probe'])
register_Ns3SqliteDataOutput_methods(root_module, root_module['ns3::SqliteDataOutput'])
register_Ns3TimeMinMaxAvgTotalCalculator_methods(root_module, root_module['ns3::TimeMinMaxAvgTotalCalculator'])
register_Ns3TimeSeriesAdaptor_methods(root_module, root_module['ns3::TimeSeriesAdaptor'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Uinteger16Probe_methods(root_module, root_module['ns3::Uinteger16Probe'])
register_Ns3Uinteger32Probe_methods(root_module, root_module['ns3::Uinteger32Probe'])
register_Ns3Uinteger8Probe_methods(root_module, root_module['ns3::Uinteger8Probe'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3BooleanProbe_methods(root_module, root_module['ns3::BooleanProbe'])
register_Ns3DoubleProbe_methods(root_module, root_module['ns3::DoubleProbe'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3DataOutputCallback_methods(root_module, cls):
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback() [constructor]
cls.add_constructor([])
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback(ns3::DataOutputCallback const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataOutputCallback const &', 'arg0')])
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, int val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('int', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, uint32_t val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('uint32_t', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, double val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('double', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, std::string val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('std::string', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, ns3::Time val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('ns3::Time', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputStatistic(std::string key, std::string variable, ns3::StatisticalSummary const * statSum) [member function]
cls.add_method('OutputStatistic',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('ns3::StatisticalSummary const *', 'statSum')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3FileHelper_methods(root_module, cls):
## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper(ns3::FileHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FileHelper const &', 'arg0')])
## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper() [constructor]
cls.add_constructor([])
## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper(std::string const & outputFileNameWithoutExtension, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::SPACE_SEPARATED) [constructor]
cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::SPACE_SEPARATED')])
## file-helper.h (module 'stats'): void ns3::FileHelper::AddAggregator(std::string const & aggregatorName, std::string const & outputFileName, bool onlyOneAggregator) [member function]
cls.add_method('AddAggregator',
'void',
[param('std::string const &', 'aggregatorName'), param('std::string const &', 'outputFileName'), param('bool', 'onlyOneAggregator')])
## file-helper.h (module 'stats'): void ns3::FileHelper::AddProbe(std::string const & typeId, std::string const & probeName, std::string const & path) [member function]
cls.add_method('AddProbe',
'void',
[param('std::string const &', 'typeId'), param('std::string const &', 'probeName'), param('std::string const &', 'path')])
## file-helper.h (module 'stats'): void ns3::FileHelper::AddTimeSeriesAdaptor(std::string const & adaptorName) [member function]
cls.add_method('AddTimeSeriesAdaptor',
'void',
[param('std::string const &', 'adaptorName')])
## file-helper.h (module 'stats'): void ns3::FileHelper::ConfigureFile(std::string const & outputFileNameWithoutExtension, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::SPACE_SEPARATED) [member function]
cls.add_method('ConfigureFile',
'void',
[param('std::string const &', 'outputFileNameWithoutExtension'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::SPACE_SEPARATED')])
## file-helper.h (module 'stats'): ns3::Ptr<ns3::FileAggregator> ns3::FileHelper::GetAggregatorMultiple(std::string const & aggregatorName, std::string const & outputFileName) [member function]
cls.add_method('GetAggregatorMultiple',
'ns3::Ptr< ns3::FileAggregator >',
[param('std::string const &', 'aggregatorName'), param('std::string const &', 'outputFileName')])
## file-helper.h (module 'stats'): ns3::Ptr<ns3::FileAggregator> ns3::FileHelper::GetAggregatorSingle() [member function]
cls.add_method('GetAggregatorSingle',
'ns3::Ptr< ns3::FileAggregator >',
[])
## file-helper.h (module 'stats'): ns3::Ptr<ns3::Probe> ns3::FileHelper::GetProbe(std::string probeName) const [member function]
cls.add_method('GetProbe',
'ns3::Ptr< ns3::Probe >',
[param('std::string', 'probeName')],
is_const=True)
## file-helper.h (module 'stats'): void ns3::FileHelper::Set10dFormat(std::string const & format) [member function]
cls.add_method('Set10dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set1dFormat(std::string const & format) [member function]
cls.add_method('Set1dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set2dFormat(std::string const & format) [member function]
cls.add_method('Set2dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set3dFormat(std::string const & format) [member function]
cls.add_method('Set3dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set4dFormat(std::string const & format) [member function]
cls.add_method('Set4dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set5dFormat(std::string const & format) [member function]
cls.add_method('Set5dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set6dFormat(std::string const & format) [member function]
cls.add_method('Set6dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set7dFormat(std::string const & format) [member function]
cls.add_method('Set7dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set8dFormat(std::string const & format) [member function]
cls.add_method('Set8dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set9dFormat(std::string const & format) [member function]
cls.add_method('Set9dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::SetHeading(std::string const & heading) [member function]
cls.add_method('SetHeading',
'void',
[param('std::string const &', 'heading')])
## file-helper.h (module 'stats'): void ns3::FileHelper::WriteProbe(std::string const & typeId, std::string const & path, std::string const & probeTraceSource) [member function]
cls.add_method('WriteProbe',
'void',
[param('std::string const &', 'typeId'), param('std::string const &', 'path'), param('std::string const &', 'probeTraceSource')])
return
def register_Ns3Gnuplot_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot::Gnuplot(ns3::Gnuplot const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot::Gnuplot(std::string const & outputFilename="", std::string const & title="") [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename', default_value='""'), param('std::string const &', 'title', default_value='""')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::AddDataset(ns3::GnuplotDataset const & dataset) [member function]
cls.add_method('AddDataset',
'void',
[param('ns3::GnuplotDataset const &', 'dataset')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::AppendExtra(std::string const & extra) [member function]
cls.add_method('AppendExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'stats'): static std::string ns3::Gnuplot::DetectTerminal(std::string const & filename) [member function]
cls.add_method('DetectTerminal',
'std::string',
[param('std::string const &', 'filename')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::Gnuplot::GenerateOutput(std::ostream & os) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::GenerateOutput(std::ostream & osControl, std::ostream & osData, std::string dataFileName) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'osControl'), param('std::ostream &', 'osData'), param('std::string', 'dataFileName')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetDataFileDatasetIndex(unsigned int index) [member function]
cls.add_method('SetDataFileDatasetIndex',
'void',
[param('unsigned int', 'index')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function]
cls.add_method('SetLegend',
'void',
[param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetOutputFilename(std::string const & outputFilename) [member function]
cls.add_method('SetOutputFilename',
'void',
[param('std::string const &', 'outputFilename')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
return
def register_Ns3GnuplotCollection_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::GnuplotCollection::GnuplotCollection(ns3::GnuplotCollection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GnuplotCollection const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::GnuplotCollection::GnuplotCollection(std::string const & outputFilename) [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::AddPlot(ns3::Gnuplot const & plot) [member function]
cls.add_method('AddPlot',
'void',
[param('ns3::Gnuplot const &', 'plot')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & os) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & osControl, std::ostream & osData, std::string dataFileName) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'osControl'), param('std::ostream &', 'osData'), param('std::string', 'dataFileName')])
## gnuplot.h (module 'stats'): ns3::Gnuplot & ns3::GnuplotCollection::GetPlot(unsigned int id) [member function]
cls.add_method('GetPlot',
'ns3::Gnuplot &',
[param('unsigned int', 'id')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
return
def register_Ns3GnuplotDataset_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset const & original) [copy constructor]
cls.add_constructor([param('ns3::GnuplotDataset const &', 'original')])
## gnuplot.h (module 'stats'): static void ns3::GnuplotDataset::SetDefaultExtra(std::string const & extra) [member function]
cls.add_method('SetDefaultExtra',
'void',
[param('std::string const &', 'extra')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::GnuplotDataset::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'stats'): void ns3::GnuplotDataset::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
## gnuplot.h (module 'stats'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset::Data * data) [constructor]
cls.add_constructor([param('ns3::GnuplotDataset::Data *', 'data')],
visibility='protected')
return
def register_Ns3GnuplotHelper_methods(root_module, cls):
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper(ns3::GnuplotHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GnuplotHelper const &', 'arg0')])
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper() [constructor]
cls.add_constructor([])
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper(std::string const & outputFileNameWithoutExtension, std::string const & title, std::string const & xLegend, std::string const & yLegend, std::string const & terminalType="png") [constructor]
cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension'), param('std::string const &', 'title'), param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend'), param('std::string const &', 'terminalType', default_value='"png"')])
## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::AddProbe(std::string const & typeId, std::string const & probeName, std::string const & path) [member function]
cls.add_method('AddProbe',
'void',
[param('std::string const &', 'typeId'), param('std::string const &', 'probeName'), param('std::string const &', 'path')])
## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::AddTimeSeriesAdaptor(std::string const & adaptorName) [member function]
cls.add_method('AddTimeSeriesAdaptor',
'void',
[param('std::string const &', 'adaptorName')])
## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::ConfigurePlot(std::string const & outputFileNameWithoutExtension, std::string const & title, std::string const & xLegend, std::string const & yLegend, std::string const & terminalType="png") [member function]
cls.add_method('ConfigurePlot',
'void',
[param('std::string const &', 'outputFileNameWithoutExtension'), param('std::string const &', 'title'), param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend'), param('std::string const &', 'terminalType', default_value='"png"')])
## gnuplot-helper.h (module 'stats'): ns3::Ptr<ns3::GnuplotAggregator> ns3::GnuplotHelper::GetAggregator() [member function]
cls.add_method('GetAggregator',
'ns3::Ptr< ns3::GnuplotAggregator >',
[])
## gnuplot-helper.h (module 'stats'): ns3::Ptr<ns3::Probe> ns3::GnuplotHelper::GetProbe(std::string probeName) const [member function]
cls.add_method('GetProbe',
'ns3::Ptr< ns3::Probe >',
[param('std::string', 'probeName')],
is_const=True)
## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::PlotProbe(std::string const & typeId, std::string const & path, std::string const & probeTraceSource, std::string const & title, ns3::GnuplotAggregator::KeyLocation keyLocation=::ns3::GnuplotAggregator::KEY_INSIDE) [member function]
cls.add_method('PlotProbe',
'void',
[param('std::string const &', 'typeId'), param('std::string const &', 'path'), param('std::string const &', 'probeTraceSource'), param('std::string const &', 'title'), param('ns3::GnuplotAggregator::KeyLocation', 'keyLocation', default_value='::ns3::GnuplotAggregator::KEY_INSIDE')])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3StatisticalSummary_methods(root_module, cls):
## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary() [constructor]
cls.add_constructor([])
## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary(ns3::StatisticalSummary const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StatisticalSummary const &', 'arg0')])
## data-calculator.h (module 'stats'): long int ns3::StatisticalSummary::getCount() const [member function]
cls.add_method('getCount',
'long int',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMax() const [member function]
cls.add_method('getMax',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMean() const [member function]
cls.add_method('getMean',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMin() const [member function]
cls.add_method('getMin',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSqrSum() const [member function]
cls.add_method('getSqrSum',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getStddev() const [member function]
cls.add_method('getStddev',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSum() const [member function]
cls.add_method('getSum',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getVariance() const [member function]
cls.add_method('getVariance',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TracedValue__Bool_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(ns3::TracedValue<bool> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< bool > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(bool const & v) [constructor]
cls.add_constructor([param('bool const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): bool ns3::TracedValue<bool>::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Set(bool const & v) [member function]
cls.add_method('Set',
'void',
[param('bool const &', 'v')])
return
def register_Ns3TracedValue__Double_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< double > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(double const & v) [constructor]
cls.add_constructor([param('double const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): double ns3::TracedValue<double>::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Set(double const & v) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'v')])
return
def register_Ns3TracedValue__Unsigned_char_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(ns3::TracedValue<unsigned char> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned char > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(unsigned char const & v) [constructor]
cls.add_constructor([param('unsigned char const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): unsigned char ns3::TracedValue<unsigned char>::Get() const [member function]
cls.add_method('Get',
'unsigned char',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Set(unsigned char const & v) [member function]
cls.add_method('Set',
'void',
[param('unsigned char const &', 'v')])
return
def register_Ns3TracedValue__Unsigned_int_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(ns3::TracedValue<unsigned int> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(unsigned int const & v) [constructor]
cls.add_constructor([param('unsigned int const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): unsigned int ns3::TracedValue<unsigned int>::Get() const [member function]
cls.add_method('Get',
'unsigned int',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Set(unsigned int const & v) [member function]
cls.add_method('Set',
'void',
[param('unsigned int const &', 'v')])
return
def register_Ns3TracedValue__Unsigned_short_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(ns3::TracedValue<unsigned short> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned short > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(short unsigned int const & v) [constructor]
cls.add_constructor([param('short unsigned int const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): short unsigned int ns3::TracedValue<unsigned short>::Get() const [member function]
cls.add_method('Get',
'short unsigned int',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::Set(short unsigned int const & v) [member function]
cls.add_method('Set',
'void',
[param('short unsigned int const &', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Gnuplot2dDataset_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(ns3::Gnuplot2dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dDataset const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::Add(double x, double y) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::Add(double x, double y, double errorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::Add(double x, double y, double xErrorDelta, double yErrorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'stats'): static void ns3::Gnuplot2dDataset::SetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetDefaultErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')],
is_static=True)
## gnuplot.h (module 'stats'): static void ns3::Gnuplot2dDataset::SetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::SetErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::SetStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')])
return
def register_Ns3Gnuplot2dFunction_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(ns3::Gnuplot2dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dFunction const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Gnuplot3dDataset_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(ns3::Gnuplot3dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dDataset const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dDataset::Add(double x, double y, double z) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'z')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'stats'): static void ns3::Gnuplot3dDataset::SetDefaultStyle(std::string const & style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('std::string const &', 'style')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dDataset::SetStyle(std::string const & style) [member function]
cls.add_method('SetStyle',
'void',
[param('std::string const &', 'style')])
return
def register_Ns3Gnuplot3dFunction_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(ns3::Gnuplot3dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dFunction const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DataCalculator_methods(root_module, cls):
## data-calculator.h (module 'stats'): ns3::DataCalculator::DataCalculator(ns3::DataCalculator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataCalculator const &', 'arg0')])
## data-calculator.h (module 'stats'): ns3::DataCalculator::DataCalculator() [constructor]
cls.add_constructor([])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## data-calculator.h (module 'stats'): std::string ns3::DataCalculator::GetContext() const [member function]
cls.add_method('GetContext',
'std::string',
[],
is_const=True)
## data-calculator.h (module 'stats'): bool ns3::DataCalculator::GetEnabled() const [member function]
cls.add_method('GetEnabled',
'bool',
[],
is_const=True)
## data-calculator.h (module 'stats'): std::string ns3::DataCalculator::GetKey() const [member function]
cls.add_method('GetKey',
'std::string',
[],
is_const=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::SetContext(std::string const context) [member function]
cls.add_method('SetContext',
'void',
[param('std::string const', 'context')])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::SetKey(std::string const key) [member function]
cls.add_method('SetKey',
'void',
[param('std::string const', 'key')])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Start(ns3::Time const & startTime) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time const &', 'startTime')],
is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Stop(ns3::Time const & stopTime) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'stopTime')],
is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3DataCollectionObject_methods(root_module, cls):
## data-collection-object.h (module 'stats'): ns3::DataCollectionObject::DataCollectionObject(ns3::DataCollectionObject const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataCollectionObject const &', 'arg0')])
## data-collection-object.h (module 'stats'): ns3::DataCollectionObject::DataCollectionObject() [constructor]
cls.add_constructor([])
## data-collection-object.h (module 'stats'): void ns3::DataCollectionObject::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## data-collection-object.h (module 'stats'): void ns3::DataCollectionObject::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## data-collection-object.h (module 'stats'): std::string ns3::DataCollectionObject::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## data-collection-object.h (module 'stats'): static ns3::TypeId ns3::DataCollectionObject::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## data-collection-object.h (module 'stats'): bool ns3::DataCollectionObject::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True, is_virtual=True)
## data-collection-object.h (module 'stats'): void ns3::DataCollectionObject::SetName(std::string name) [member function]
cls.add_method('SetName',
'void',
[param('std::string', 'name')])
return
def register_Ns3DataCollector_methods(root_module, cls):
## data-collector.h (module 'stats'): ns3::DataCollector::DataCollector(ns3::DataCollector const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataCollector const &', 'arg0')])
## data-collector.h (module 'stats'): ns3::DataCollector::DataCollector() [constructor]
cls.add_constructor([])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddDataCalculator(ns3::Ptr<ns3::DataCalculator> datac) [member function]
cls.add_method('AddDataCalculator',
'void',
[param('ns3::Ptr< ns3::DataCalculator >', 'datac')])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddMetadata(std::string key, std::string value) [member function]
cls.add_method('AddMetadata',
'void',
[param('std::string', 'key'), param('std::string', 'value')])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddMetadata(std::string key, double value) [member function]
cls.add_method('AddMetadata',
'void',
[param('std::string', 'key'), param('double', 'value')])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddMetadata(std::string key, uint32_t value) [member function]
cls.add_method('AddMetadata',
'void',
[param('std::string', 'key'), param('uint32_t', 'value')])
## data-collector.h (module 'stats'): std::_List_iterator<ns3::Ptr<ns3::DataCalculator> > ns3::DataCollector::DataCalculatorBegin() [member function]
cls.add_method('DataCalculatorBegin',
'std::_List_iterator< ns3::Ptr< ns3::DataCalculator > >',
[])
## data-collector.h (module 'stats'): std::_List_iterator<ns3::Ptr<ns3::DataCalculator> > ns3::DataCollector::DataCalculatorEnd() [member function]
cls.add_method('DataCalculatorEnd',
'std::_List_iterator< ns3::Ptr< ns3::DataCalculator > >',
[])
## data-collector.h (module 'stats'): void ns3::DataCollector::DescribeRun(std::string experiment, std::string strategy, std::string input, std::string runID, std::string description="") [member function]
cls.add_method('DescribeRun',
'void',
[param('std::string', 'experiment'), param('std::string', 'strategy'), param('std::string', 'input'), param('std::string', 'runID'), param('std::string', 'description', default_value='""')])
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetDescription() const [member function]
cls.add_method('GetDescription',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetExperimentLabel() const [member function]
cls.add_method('GetExperimentLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetInputLabel() const [member function]
cls.add_method('GetInputLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetRunLabel() const [member function]
cls.add_method('GetRunLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetStrategyLabel() const [member function]
cls.add_method('GetStrategyLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::_List_iterator<std::pair<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::DataCollector::MetadataBegin() [member function]
cls.add_method('MetadataBegin',
'std::_List_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > >, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >',
[])
## data-collector.h (module 'stats'): std::_List_iterator<std::pair<std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::DataCollector::MetadataEnd() [member function]
cls.add_method('MetadataEnd',
'std::_List_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > >, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >',
[])
## data-collector.h (module 'stats'): void ns3::DataCollector::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3DataOutputInterface_methods(root_module, cls):
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface::DataOutputInterface(ns3::DataOutputInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataOutputInterface const &', 'arg0')])
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface::DataOutputInterface() [constructor]
cls.add_constructor([])
## data-output-interface.h (module 'stats'): std::string ns3::DataOutputInterface::GetFilePrefix() const [member function]
cls.add_method('GetFilePrefix',
'std::string',
[],
is_const=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::Output(ns3::DataCollector & dc) [member function]
cls.add_method('Output',
'void',
[param('ns3::DataCollector &', 'dc')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::SetFilePrefix(std::string const prefix) [member function]
cls.add_method('SetFilePrefix',
'void',
[param('std::string const', 'prefix')])
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int v, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int v, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int v) [member function]
cls.add_method('Set',
'void',
[param('int', 'v')])
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3FileAggregator_methods(root_module, cls):
## file-aggregator.h (module 'stats'): static ns3::TypeId ns3::FileAggregator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## file-aggregator.h (module 'stats'): ns3::FileAggregator::FileAggregator(std::string const & outputFileName, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::SPACE_SEPARATED) [constructor]
cls.add_constructor([param('std::string const &', 'outputFileName'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::SPACE_SEPARATED')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::SetFileType(ns3::FileAggregator::FileType fileType) [member function]
cls.add_method('SetFileType',
'void',
[param('ns3::FileAggregator::FileType', 'fileType')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::SetHeading(std::string const & heading) [member function]
cls.add_method('SetHeading',
'void',
[param('std::string const &', 'heading')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set1dFormat(std::string const & format) [member function]
cls.add_method('Set1dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set2dFormat(std::string const & format) [member function]
cls.add_method('Set2dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set3dFormat(std::string const & format) [member function]
cls.add_method('Set3dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set4dFormat(std::string const & format) [member function]
cls.add_method('Set4dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set5dFormat(std::string const & format) [member function]
cls.add_method('Set5dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set6dFormat(std::string const & format) [member function]
cls.add_method('Set6dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set7dFormat(std::string const & format) [member function]
cls.add_method('Set7dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set8dFormat(std::string const & format) [member function]
cls.add_method('Set8dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set9dFormat(std::string const & format) [member function]
cls.add_method('Set9dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set10dFormat(std::string const & format) [member function]
cls.add_method('Set10dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write1d(std::string context, double v1) [member function]
cls.add_method('Write1d',
'void',
[param('std::string', 'context'), param('double', 'v1')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write2d(std::string context, double v1, double v2) [member function]
cls.add_method('Write2d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write3d(std::string context, double v1, double v2, double v3) [member function]
cls.add_method('Write3d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write4d(std::string context, double v1, double v2, double v3, double v4) [member function]
cls.add_method('Write4d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write5d(std::string context, double v1, double v2, double v3, double v4, double v5) [member function]
cls.add_method('Write5d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write6d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6) [member function]
cls.add_method('Write6d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write7d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7) [member function]
cls.add_method('Write7d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write8d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7, double v8) [member function]
cls.add_method('Write8d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7'), param('double', 'v8')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write9d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7, double v8, double v9) [member function]
cls.add_method('Write9d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7'), param('double', 'v8'), param('double', 'v9')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write10d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7, double v8, double v9, double v10) [member function]
cls.add_method('Write10d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7'), param('double', 'v8'), param('double', 'v9'), param('double', 'v10')])
return
def register_Ns3GnuplotAggregator_methods(root_module, cls):
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator::GnuplotAggregator(ns3::GnuplotAggregator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GnuplotAggregator const &', 'arg0')])
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator::GnuplotAggregator(std::string const & outputFileNameWithoutExtension) [constructor]
cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Add2dDataset(std::string const & dataset, std::string const & title) [member function]
cls.add_method('Add2dDataset',
'void',
[param('std::string const &', 'dataset'), param('std::string const &', 'title')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::AppendExtra(std::string const & extra) [member function]
cls.add_method('AppendExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot-aggregator.h (module 'stats'): static ns3::TypeId ns3::GnuplotAggregator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): static void ns3::GnuplotAggregator::Set2dDatasetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('Set2dDatasetDefaultErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): static void ns3::GnuplotAggregator::Set2dDatasetDefaultExtra(std::string const & extra) [member function]
cls.add_method('Set2dDatasetDefaultExtra',
'void',
[param('std::string const &', 'extra')],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): static void ns3::GnuplotAggregator::Set2dDatasetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('Set2dDatasetDefaultStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Set2dDatasetErrorBars(std::string const & dataset, ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('Set2dDatasetErrorBars',
'void',
[param('std::string const &', 'dataset'), param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Set2dDatasetExtra(std::string const & dataset, std::string const & extra) [member function]
cls.add_method('Set2dDatasetExtra',
'void',
[param('std::string const &', 'dataset'), param('std::string const &', 'extra')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Set2dDatasetStyle(std::string const & dataset, ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('Set2dDatasetStyle',
'void',
[param('std::string const &', 'dataset'), param('ns3::Gnuplot2dDataset::Style', 'style')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetKeyLocation(ns3::GnuplotAggregator::KeyLocation keyLocation) [member function]
cls.add_method('SetKeyLocation',
'void',
[param('ns3::GnuplotAggregator::KeyLocation', 'keyLocation')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function]
cls.add_method('SetLegend',
'void',
[param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2d(std::string context, double x, double y) [member function]
cls.add_method('Write2d',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dDatasetEmptyLine(std::string const & dataset) [member function]
cls.add_method('Write2dDatasetEmptyLine',
'void',
[param('std::string const &', 'dataset')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dWithXErrorDelta(std::string context, double x, double y, double errorDelta) [member function]
cls.add_method('Write2dWithXErrorDelta',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dWithXYErrorDelta(std::string context, double x, double y, double xErrorDelta, double yErrorDelta) [member function]
cls.add_method('Write2dWithXYErrorDelta',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dWithYErrorDelta(std::string context, double x, double y, double errorDelta) [member function]
cls.add_method('Write2dWithYErrorDelta',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, cls):
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double>::MinMaxAvgTotalCalculator(ns3::MinMaxAvgTotalCalculator<double> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MinMaxAvgTotalCalculator< double > const &', 'arg0')])
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double>::MinMaxAvgTotalCalculator() [constructor]
cls.add_constructor([])
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Update(double const i) [member function]
cls.add_method('Update',
'void',
[param('double const', 'i')])
## basic-data-calculators.h (module 'stats'): long int ns3::MinMaxAvgTotalCalculator<double>::getCount() const [member function]
cls.add_method('getCount',
'long int',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMax() const [member function]
cls.add_method('getMax',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMean() const [member function]
cls.add_method('getMean',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMin() const [member function]
cls.add_method('getMin',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getSqrSum() const [member function]
cls.add_method('getSqrSum',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getStddev() const [member function]
cls.add_method('getStddev',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getSum() const [member function]
cls.add_method('getSum',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getVariance() const [member function]
cls.add_method('getVariance',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OmnetDataOutput_methods(root_module, cls):
## omnet-data-output.h (module 'stats'): ns3::OmnetDataOutput::OmnetDataOutput(ns3::OmnetDataOutput const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OmnetDataOutput const &', 'arg0')])
## omnet-data-output.h (module 'stats'): ns3::OmnetDataOutput::OmnetDataOutput() [constructor]
cls.add_constructor([])
## omnet-data-output.h (module 'stats'): void ns3::OmnetDataOutput::Output(ns3::DataCollector & dc) [member function]
cls.add_method('Output',
'void',
[param('ns3::DataCollector &', 'dc')],
is_virtual=True)
## omnet-data-output.h (module 'stats'): void ns3::OmnetDataOutput::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3Probe_methods(root_module, cls):
## probe.h (module 'stats'): ns3::Probe::Probe(ns3::Probe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Probe const &', 'arg0')])
## probe.h (module 'stats'): ns3::Probe::Probe() [constructor]
cls.add_constructor([])
## probe.h (module 'stats'): bool ns3::Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_pure_virtual=True, is_virtual=True)
## probe.h (module 'stats'): void ns3::Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_pure_virtual=True, is_virtual=True)
## probe.h (module 'stats'): static ns3::TypeId ns3::Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## probe.h (module 'stats'): bool ns3::Probe::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3SqliteDataOutput_methods(root_module, cls):
## sqlite-data-output.h (module 'stats'): ns3::SqliteDataOutput::SqliteDataOutput(ns3::SqliteDataOutput const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SqliteDataOutput const &', 'arg0')])
## sqlite-data-output.h (module 'stats'): ns3::SqliteDataOutput::SqliteDataOutput() [constructor]
cls.add_constructor([])
## sqlite-data-output.h (module 'stats'): void ns3::SqliteDataOutput::Output(ns3::DataCollector & dc) [member function]
cls.add_method('Output',
'void',
[param('ns3::DataCollector &', 'dc')],
is_virtual=True)
## sqlite-data-output.h (module 'stats'): void ns3::SqliteDataOutput::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TimeMinMaxAvgTotalCalculator_methods(root_module, cls):
## time-data-calculators.h (module 'stats'): ns3::TimeMinMaxAvgTotalCalculator::TimeMinMaxAvgTotalCalculator(ns3::TimeMinMaxAvgTotalCalculator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeMinMaxAvgTotalCalculator const &', 'arg0')])
## time-data-calculators.h (module 'stats'): ns3::TimeMinMaxAvgTotalCalculator::TimeMinMaxAvgTotalCalculator() [constructor]
cls.add_constructor([])
## time-data-calculators.h (module 'stats'): void ns3::TimeMinMaxAvgTotalCalculator::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_const=True, is_virtual=True)
## time-data-calculators.h (module 'stats'): void ns3::TimeMinMaxAvgTotalCalculator::Update(ns3::Time const i) [member function]
cls.add_method('Update',
'void',
[param('ns3::Time const', 'i')])
## time-data-calculators.h (module 'stats'): void ns3::TimeMinMaxAvgTotalCalculator::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TimeSeriesAdaptor_methods(root_module, cls):
## time-series-adaptor.h (module 'stats'): ns3::TimeSeriesAdaptor::TimeSeriesAdaptor(ns3::TimeSeriesAdaptor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeSeriesAdaptor const &', 'arg0')])
## time-series-adaptor.h (module 'stats'): ns3::TimeSeriesAdaptor::TimeSeriesAdaptor() [constructor]
cls.add_constructor([])
## time-series-adaptor.h (module 'stats'): static ns3::TypeId ns3::TimeSeriesAdaptor::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkBoolean(bool oldData, bool newData) [member function]
cls.add_method('TraceSinkBoolean',
'void',
[param('bool', 'oldData'), param('bool', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkDouble(double oldData, double newData) [member function]
cls.add_method('TraceSinkDouble',
'void',
[param('double', 'oldData'), param('double', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkUinteger16(uint16_t oldData, uint16_t newData) [member function]
cls.add_method('TraceSinkUinteger16',
'void',
[param('uint16_t', 'oldData'), param('uint16_t', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkUinteger32(uint32_t oldData, uint32_t newData) [member function]
cls.add_method('TraceSinkUinteger32',
'void',
[param('uint32_t', 'oldData'), param('uint32_t', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkUinteger8(uint8_t oldData, uint8_t newData) [member function]
cls.add_method('TraceSinkUinteger8',
'void',
[param('uint8_t', 'oldData'), param('uint8_t', 'newData')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Uinteger16Probe_methods(root_module, cls):
## uinteger-16-probe.h (module 'stats'): ns3::Uinteger16Probe::Uinteger16Probe(ns3::Uinteger16Probe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Uinteger16Probe const &', 'arg0')])
## uinteger-16-probe.h (module 'stats'): ns3::Uinteger16Probe::Uinteger16Probe() [constructor]
cls.add_constructor([])
## uinteger-16-probe.h (module 'stats'): bool ns3::Uinteger16Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## uinteger-16-probe.h (module 'stats'): void ns3::Uinteger16Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## uinteger-16-probe.h (module 'stats'): static ns3::TypeId ns3::Uinteger16Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uinteger-16-probe.h (module 'stats'): uint16_t ns3::Uinteger16Probe::GetValue() const [member function]
cls.add_method('GetValue',
'uint16_t',
[],
is_const=True)
## uinteger-16-probe.h (module 'stats'): void ns3::Uinteger16Probe::SetValue(uint16_t value) [member function]
cls.add_method('SetValue',
'void',
[param('uint16_t', 'value')])
## uinteger-16-probe.h (module 'stats'): static void ns3::Uinteger16Probe::SetValueByPath(std::string path, uint16_t value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('uint16_t', 'value')],
is_static=True)
return
def register_Ns3Uinteger32Probe_methods(root_module, cls):
## uinteger-32-probe.h (module 'stats'): ns3::Uinteger32Probe::Uinteger32Probe(ns3::Uinteger32Probe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Uinteger32Probe const &', 'arg0')])
## uinteger-32-probe.h (module 'stats'): ns3::Uinteger32Probe::Uinteger32Probe() [constructor]
cls.add_constructor([])
## uinteger-32-probe.h (module 'stats'): bool ns3::Uinteger32Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## uinteger-32-probe.h (module 'stats'): void ns3::Uinteger32Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## uinteger-32-probe.h (module 'stats'): static ns3::TypeId ns3::Uinteger32Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uinteger-32-probe.h (module 'stats'): uint32_t ns3::Uinteger32Probe::GetValue() const [member function]
cls.add_method('GetValue',
'uint32_t',
[],
is_const=True)
## uinteger-32-probe.h (module 'stats'): void ns3::Uinteger32Probe::SetValue(uint32_t value) [member function]
cls.add_method('SetValue',
'void',
[param('uint32_t', 'value')])
## uinteger-32-probe.h (module 'stats'): static void ns3::Uinteger32Probe::SetValueByPath(std::string path, uint32_t value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('uint32_t', 'value')],
is_static=True)
return
def register_Ns3Uinteger8Probe_methods(root_module, cls):
## uinteger-8-probe.h (module 'stats'): ns3::Uinteger8Probe::Uinteger8Probe(ns3::Uinteger8Probe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Uinteger8Probe const &', 'arg0')])
## uinteger-8-probe.h (module 'stats'): ns3::Uinteger8Probe::Uinteger8Probe() [constructor]
cls.add_constructor([])
## uinteger-8-probe.h (module 'stats'): bool ns3::Uinteger8Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## uinteger-8-probe.h (module 'stats'): void ns3::Uinteger8Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## uinteger-8-probe.h (module 'stats'): static ns3::TypeId ns3::Uinteger8Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uinteger-8-probe.h (module 'stats'): uint8_t ns3::Uinteger8Probe::GetValue() const [member function]
cls.add_method('GetValue',
'uint8_t',
[],
is_const=True)
## uinteger-8-probe.h (module 'stats'): void ns3::Uinteger8Probe::SetValue(uint8_t value) [member function]
cls.add_method('SetValue',
'void',
[param('uint8_t', 'value')])
## uinteger-8-probe.h (module 'stats'): static void ns3::Uinteger8Probe::SetValueByPath(std::string path, uint8_t value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('uint8_t', 'value')],
is_static=True)
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3BooleanProbe_methods(root_module, cls):
## boolean-probe.h (module 'stats'): ns3::BooleanProbe::BooleanProbe(ns3::BooleanProbe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanProbe const &', 'arg0')])
## boolean-probe.h (module 'stats'): ns3::BooleanProbe::BooleanProbe() [constructor]
cls.add_constructor([])
## boolean-probe.h (module 'stats'): bool ns3::BooleanProbe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## boolean-probe.h (module 'stats'): void ns3::BooleanProbe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## boolean-probe.h (module 'stats'): static ns3::TypeId ns3::BooleanProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## boolean-probe.h (module 'stats'): bool ns3::BooleanProbe::GetValue() const [member function]
cls.add_method('GetValue',
'bool',
[],
is_const=True)
## boolean-probe.h (module 'stats'): void ns3::BooleanProbe::SetValue(bool value) [member function]
cls.add_method('SetValue',
'void',
[param('bool', 'value')])
## boolean-probe.h (module 'stats'): static void ns3::BooleanProbe::SetValueByPath(std::string path, bool value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('bool', 'value')],
is_static=True)
return
def register_Ns3DoubleProbe_methods(root_module, cls):
## double-probe.h (module 'stats'): ns3::DoubleProbe::DoubleProbe(ns3::DoubleProbe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleProbe const &', 'arg0')])
## double-probe.h (module 'stats'): ns3::DoubleProbe::DoubleProbe() [constructor]
cls.add_constructor([])
## double-probe.h (module 'stats'): bool ns3::DoubleProbe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## double-probe.h (module 'stats'): void ns3::DoubleProbe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## double-probe.h (module 'stats'): static ns3::TypeId ns3::DoubleProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## double-probe.h (module 'stats'): double ns3::DoubleProbe::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## double-probe.h (module 'stats'): void ns3::DoubleProbe::SetValue(double value) [member function]
cls.add_method('SetValue',
'void',
[param('double', 'value')])
## double-probe.h (module 'stats'): static void ns3::DoubleProbe::SetValueByPath(std::string path, double value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('double', 'value')],
is_static=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## get-wildcard-matches.h (module 'stats'): extern std::string ns3::GetWildcardMatches(std::string const & configPath, std::string const & matchedPath, std::string const & wildcardSeparator=" ") [free function]
module.add_function('GetWildcardMatches',
'std::string',
[param('std::string const &', 'configPath'), param('std::string const &', 'matchedPath'), param('std::string const &', 'wildcardSeparator', default_value='" "')])
## data-calculator.h (module 'stats'): bool ns3::isNaN(double x) [free function]
module.add_function('isNaN',
'bool',
[param('double', 'x')])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2008 Princeton University
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from BaseGarnetNetwork import BaseGarnetNetwork
from BasicRouter import BasicRouter
from ClockedObject import ClockedObject
class GarnetRouter(BasicRouter):
type = 'GarnetRouter'
cxx_class = 'Router'
cxx_header = "mem/ruby/network/garnet/flexible-pipeline/Router.hh"
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.Int(Parent.number_of_virtual_networks,
"number of virtual networks")
class GarnetNetworkInterface(ClockedObject):
type = 'GarnetNetworkInterface'
cxx_class = 'NetworkInterface'
cxx_header = "mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.hh"
id = Param.UInt32("ID in relation to other network interfaces")
vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.UInt32(Parent.number_of_virtual_networks,
"number of virtual networks")
class GarnetNetwork(BaseGarnetNetwork):
type = 'GarnetNetwork'
cxx_header = "mem/ruby/network/garnet/flexible-pipeline/GarnetNetwork.hh"
buffer_size = Param.UInt32(0,
"default buffer size; 0 indicates infinite buffering");
number_of_pipe_stages = Param.UInt32(4, "router pipeline stages"); | unknown | codeparrot/codeparrot-clean | ||
#pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
import math
from mantid.kernel import *
from mantid.api import *
class EnggFitPeaks(PythonAlgorithm):
EXPECTED_DIM_TYPE = 'Time-of-flight'
PEAK_TYPE = 'BackToBackExponential'
# Max limit on the estimated error of a center for it to be accepted as a good fit
# (in percentage of the center value)
CENTER_ERROR_LIMIT = 10
def category(self):
return "Diffraction\\Engineering;Diffraction\\Fitting"
def name(self):
return "EnggFitPeaks"
def summary(self):
return ("The algorithm fits an expected diffraction pattern to a spectrum from a workspace "
"by fitting one peak at a time (single peak fits).")
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "", Direction.Input),
"Workspace to fit peaks in. The X units must be time of flight (TOF).")
self.declareProperty("WorkspaceIndex", 0,
"Index of the spectra to fit peaks in")
self.declareProperty(FloatArrayProperty("ExpectedPeaks", (self._get_default_peaks())),
"A list of dSpacing values to be translated into TOF to find expected peaks.")
self.declareProperty(FileProperty(name="ExpectedPeaksFromFile",defaultValue="",
action=FileAction.OptionalLoad,extensions = [".csv"]),
"Load from file a list of dSpacing values to be translated into TOF to "
"find expected peaks. This takes precedence over 'ExpectedPeaks' if both "
"options are given.")
peaks_grp = 'Peaks to fit'
self.setPropertyGroup('ExpectedPeaks', peaks_grp)
self.setPropertyGroup('ExpectedPeaksFromFile', peaks_grp)
self.declareProperty('OutFittedPeaksTable', '', direction=Direction.Input,
doc = 'Name for a table workspace with the parameters of the peaks found and '
'fitted. If not given, the table workspace is not created.')
self.declareProperty(ITableWorkspaceProperty("FittedPeaks", "", Direction.Output),
doc = "Information on fitted peaks. The table contains, for every peak fitted "
"the expected peak value (in d-spacing), and the parameters fitted. The expected "
"values are given in the column labelled 'dSpacing'. When fitting "
"back-to-back exponential functions, the 'X0' column has the fitted peak center.")
def PyExec(self):
import EnggUtils
# Get peaks in dSpacing from file
expected_peaks_dsp = EnggUtils.read_in_expected_peaks(self.getPropertyValue("ExpectedPeaksFromFile"),
self.getProperty('ExpectedPeaks').value)
if len(expected_peaks_dsp) < 1:
raise ValueError("Cannot run this algorithm without any input expected peaks")
# Get expected peaks in TOF for the detector
in_wks = self.getProperty("InputWorkspace").value
dimType = in_wks.getXDimension().name
if self.EXPECTED_DIM_TYPE != dimType:
raise ValueError("This algorithm expects a workspace with %s X dimension, but "
"the X dimension of the input workspace is: '%s'" % (self.EXPECTED_DIM_TYPE, dimType))
wks_index = self.getProperty("WorkspaceIndex").value
# FindPeaks will return a list of peaks sorted by the centre found. Sort the peaks as well,
# so we can match them with fitted centres later.
expected_peaks_ToF = sorted(self._expected_peaks_in_ToF(expected_peaks_dsp, in_wks, wks_index))
found_peaks = self._peaks_from_find_peaks(in_wks, expected_peaks_ToF, wks_index)
if found_peaks.rowCount() < len(expected_peaks_ToF):
txt = "Peaks effectively found: " + str(found_peaks)[1:-1]
self.log().warning("Some peaks from the list of expected peaks were not found by the algorithm "
"FindPeaks which this algorithm uses to check that the data has the the "
"expected peaks. " + txt)
peaks_table_name = self.getPropertyValue("OutFittedPeaksTable")
fitted_peaks = self._fit_all_peaks(in_wks, wks_index,
(found_peaks, expected_peaks_dsp), peaks_table_name)
# mandatory output
self.setProperty('FittedPeaks', fitted_peaks)
def _get_default_peaks(self):
"""
Gets default peaks for Engg algorithms. Values from CeO2
"""
import EnggUtils
return EnggUtils.default_ceria_expected_peaks()
def _estimate_start_end_fitting_range(self, center, width):
"""
Try to predict a fit window for the peak (using magic numbers). The heuristic
+-COEF_LEFT/RIGHT sometimes produces ranges that are too narrow and contain too few
samples (one or a handful) for the fitting to run correctly. A minimum is enforced.
@Returns :: a tuple with the range (start and end values) for fitting a peak.
"""
# Magic numbers, approx. represanting the shape/proportions of a B2BExponential peak
COEF_LEFT = 2
COEF_RIGHT = 3
# Current approach: don't force a minimum width. If the width initial guess is too
# narrow we might miss some peaks.
# To prevent that, the minimum could be set to for example the arbitrary '175' which
# seemed to have good effects overall, but that can lead to fitting the wrong
# (neighbor) peaks.
MIN_RANGE_WIDTH = 1
startx = center - (width * COEF_LEFT)
endx = center + (width * COEF_RIGHT)
x_diff = endx-startx
if x_diff < MIN_RANGE_WIDTH:
inc = (min_width-x_diff)/5
endx = endx + 3*inc
startx = startx - 2*inc
return (startx, endx)
def _fit_all_peaks(self, in_wks, wks_index, peaks, peaks_table_name):
"""
This method is the core of EnggFitPeaks. It tries to fit as many peaks as there are in the list of
expected peaks passed to the algorithm. This is a single peak fitting, in the sense that peaks
are fitted separately, one at a time.
The parameters from the (Gaussian) peaks fitted by FindPeaks elsewhere (before calling this method)
are used as initial guesses.
@param in_wks :: input workspace with spectra for fitting
@param wks_index :: workspace index of the spectrum where the given peaks should be fitted
@param peaks :: tuple made of two lists: found_peaks (peaks found by FindPeaks or similar
algorithm), and expected_peaks_dsp (expected peaks given as input to this algorithm
(in dSpacing units)
@param peaks_table_name :: name of an (output) table with peaks parameters. If empty, the table is anonymous
@returns a table with parameters for every fitted peak.
"""
found_peaks = peaks[0]
fitted_peaks = self._create_fitted_peaks_table(peaks_table_name)
prog = Progress(self, start=0, end=1, nreports=found_peaks.rowCount())
for i in range(found_peaks.rowCount()):
prog.report('Fitting peak number ' + str(i+1))
row = found_peaks.row(i)
# Peak parameters estimated by FindPeaks
initial_params = (row['centre'], row['width'], row['height'])
# Oh oh, this actually happens sometimes for some spectra of the system test dataset
# and it should be clarified when the role of FindPeaks etc. is fixed (trac ticket #10907)
width = initial_params[2]
if width <= 0.:
detailTxt = ("Cannot fit a peak with these initial parameters from FindPeaks, center: %s "
", width: %s, height: %s"%(initial_params[0], width, initial_params[1]))
self.log().notice('For workspace index ' + str(wks_index) + ', a peak that is in the list of '
'expected peaks and was found by FindPeaks has not been fitted correctly. '
'It will be ignored. ' + "Expected, dSpacing: {0}. Details: {1}".
format(peaks[1][i], detailTxt))
continue
try:
param_table, chi_over_dof = self._fit_single_peak(peaks[1][i], initial_params, in_wks, wks_index)
except RuntimeError:
self.log().warning("Problem found when trying to fit a peak centered at {0} (dSpacing), "
"for which the initial guess from FindPeaks is at {1} (ToF). Single "
"peak fitting failed. Skipping this peak."
.format(peaks[1][i], initial_params[0]))
continue
fitted_params = {}
fitted_params['dSpacing'] = peaks[1][i]
fitted_params['Chi'] = chi_over_dof
self._add_parameters_to_map(fitted_params, param_table)
if self._peak_is_acceptable(fitted_params, in_wks, wks_index):
fitted_peaks.addRow(fitted_params)
else:
self.log().notice("Discarding peak found with wrong center and/or excessive or suspicious "
"error estimate in the center estimate: {0} (ToF) ({1}, dSpacing), "
"with error: {2}, for dSpacing {3}".
format(fitted_params['X0'], peaks[1][i],
fitted_params['X0_Err'], fitted_params['dSpacing']))
# Check if we were able to really fit any peak
if 0 == fitted_peaks.rowCount():
detailTxt = ("Could find " + str(len(found_peaks)) + " peaks using the algorithm FindPeaks but " +
"then it was not possible to fit any peak starting from these peaks found and using '" +
self.PEAK_TYPE + "' as peak function.")
self.log().warning('Could not fit any peak. Please check the list of expected peaks, as it does not '
'seem to be appropriate for the workspace given. More details: ' +
detailTxt)
raise RuntimeError('Could not fit any peak. Failed to fit peaks with peak type ' +
self.PEAK_TYPE + ' even though FindPeaks found ' + str(found_peaks.rowCount()) +
' peaks in principle. See the logs for further details.')
self.log().information("Fitted {0} peaks in total.".format(fitted_peaks.rowCount()))
return fitted_peaks
def _fit_single_peak(self, expected_center, initial_params, wks, wks_index):
"""
Fits one peak, given an initial guess of parameters (center, width, height).
@param expected_center :: expected peak position
@param initial_params :: tuple with initial guess of the peak center, width and height
@param wks :: workspace with data (spectra) to fit
@param wks_index :: index of the spectrum to fit
@return parameters from Fit, and the goodness of fit estimation from Fit (as Chi^2/DoF)
"""
center, width, height = initial_params
# Sigma value of the peak, assuming Gaussian shape
sigma = width / (2 * math.sqrt(2 * math.log(2)))
# Approximate peak intensity, assuming Gaussian shape
intensity = height * sigma * math.sqrt(2 * math.pi)
peak = FunctionFactory.createFunction(self.PEAK_TYPE)
peak.setParameter('X0', center)
peak.setParameter('S', sigma)
peak.setParameter('I', intensity)
# Fit using predicted window and a proper function with approximated initital values
fit_alg = self.createChildAlgorithm('Fit')
fit_function = 'name=LinearBackground;{0}'.format(peak)
fit_alg.setProperty('Function', fit_function)
fit_alg.setProperty('InputWorkspace', wks)
fit_alg.setProperty('WorkspaceIndex', wks_index)
fit_alg.setProperty('CreateOutput', True)
(startx, endx) = self._estimate_start_end_fitting_range(center, width)
fit_alg.setProperty('StartX', startx)
fit_alg.setProperty('EndX', endx)
self.log().debug("Fitting for peak expectd in (d-spacing): {0}, Fitting peak function: "
"{1}, with startx: {2}, endx: {3}".
format(expected_center, fit_function, startx, endx))
fit_alg.execute()
param_table = fit_alg.getProperty('OutputParameters').value
chi_over_dof = fit_alg.getProperty('OutputChi2overDoF').value
return (param_table, chi_over_dof)
def _peaks_from_find_peaks(self, in_wks, expected_peaks_ToF, wks_index):
"""
Use the algorithm FindPeaks to check that the expected peaks are there.
@param in_wks data workspace
@param expected_peaks_ToF vector/list of expected peak values
@param wks_index workspace index
@return list of peaks found by FindPeaks. If there are no issues, the length
of this list should be the same as the number of expected peaks received.
"""
# Find approximate peak positions, asumming Gaussian shapes
find_peaks_alg = self.createChildAlgorithm('FindPeaks')
find_peaks_alg.setProperty('InputWorkspace', in_wks)
find_peaks_alg.setProperty('PeakPositions', expected_peaks_ToF)
find_peaks_alg.setProperty('PeakFunction', 'Gaussian')
find_peaks_alg.setProperty('WorkspaceIndex', wks_index)
find_peaks_alg.execute()
found_peaks = find_peaks_alg.getProperty('PeaksList').value
return found_peaks
def _expected_peaks_in_ToF(self, expected_peaks, in_wks, wks_index):
"""
Converts expected peak dSpacing values to TOF values for the
detector. Implemented by using the Mantid algorithm ConvertUnits. A
simple user script to do what this function does would be
as follows:
import mantid.simpleapi as sapi
yVals = [1] * (len(expected_peaks) - 1)
ws_from = sapi.CreateWorkspace(UnitX='dSpacing', DataX=expected_peaks, DataY=yVals,
ParentWorkspace=in_wks)
target_units = 'TOF'
wsTo = sapi.ConvertUnits(InputWorkspace=ws_from, Target=target_units)
peaks_ToF = wsTo.dataX(0)
values = [peaks_ToF[i] for i in range(0,len(peaks_ToF))]
@param expected_peaks :: vector of expected peaks, in dSpacing units
@param in_wks :: input workspace with the relevant instrument/geometry
@param wks_index workspace index
Returns:
a vector of ToF values converted from the input (dSpacing) vector.
"""
# This and the next exception, below, still need revisiting:
# https://github.com/mantidproject/mantid/issues/12930
run = in_wks.getRun()
if 1 == in_wks.getNumberHistograms() and run.hasProperty('difc'):
difc = run.getLogData('difc').value
#if run.hasProperty('difa'):
# _difa = run.getLogData('difa').value
#else:
# _difa = 0
if run.hasProperty('tzero'):
tzero = run.getLogData('tzero').value
else:
tzero = 0
# If the log difc is present, then use these GSAS calibration parameters from the logs
return [(epd * difc + tzero) for epd in expected_peaks]
# When receiving a (for example) focussed workspace we still do not know how
# to properly deal with it. CreateWorkspace won't copy the instrument sample
# and source even if given the option ParentWorkspace. Resort to old style
# hard-coded calculation.
# The present behavior of 'ConvertUnits' is to show an information log:
# "Unable to calculate sample-detector distance for 1 spectra. Masking spectrum"
# and silently produce a wrong output workspace. That might need revisiting.
if 1 == in_wks.getNumberHistograms():
return self._do_approx_hard_coded_convert_units_to_ToF(expected_peaks, in_wks, wks_index)
# Create workspace just to convert dSpacing -> ToF, yVals are irrelevant
# this used to be calculated with:
# lambda d: 252.816 * 2 * (50 + detL2) * math.sin(detTwoTheta / 2.0) * d
# which is approximately what ConverUnits will do
# remember the -1, we must produce a histogram data workspace, which is what
# for example EnggCalibrate expects
yVals = [1] * (len(expected_peaks) - 1)
# Do like: ws_from = sapi.CreateWorkspace(UnitX='dSpacing', DataX=expected_peaks, DataY=yVals,
# ParentWorkspace=self.getProperty("InputWorkspace").value)
create_alg = self.createChildAlgorithm("CreateWorkspace")
create_alg.setProperty("UnitX", 'dSpacing')
create_alg.setProperty("DataX", expected_peaks)
create_alg.setProperty("DataY", yVals)
create_alg.setProperty("ParentWorkspace", in_wks)
create_alg.execute()
ws_from = create_alg.getProperty("OutputWorkspace").value
# finally convert units, like: sapi.ConvertUnits(InputWorkspace=ws_from, Target=target_units)
conv_alg = self.createChildAlgorithm("ConvertUnits")
conv_alg.setProperty("InputWorkspace", ws_from)
target_units = 'TOF'
conv_alg.setProperty("Target", target_units)
# note: this implicitly uses default property "EMode" value 'Elastic'
goodExec = conv_alg.execute()
if not goodExec:
raise RuntimeError("Conversion of units went wrong. Failed to run ConvertUnits for {0} "
"peaks. Details: {1}".format(len(expected_peaks), expected_peaks))
wsTo = conv_alg.getProperty('OutputWorkspace').value
peaks_ToF = wsTo.readX(0)
if len(peaks_ToF) != len(expected_peaks):
raise RuntimeError("Conversion of units went wrong. Converted {0} peaks from the "
"original list of {1} peaks. The instrument definition might be "
"incomplete for the original workspace / file.".
format(len(peaks_ToF), len(expected_peaks)))
tof_values = [peaks_ToF[i] for i in range(0,len(peaks_ToF))]
# catch potential failures because of geometry issues, etc.
if tof_values == expected_peaks:
vals = self._do_approx_hard_coded_convert_units_to_ToF(expected_peaks, in_wks, wks_index)
return vals
return tof_values
def _do_approx_hard_coded_convert_units_to_ToF(self, dsp_values, ws, wks_index):
"""
Converts from dSpacing to Time-of-flight, for one spectrum/detector. This method
is here for exceptional cases that presently need clarification / further work,
here and elsewhere in Mantid, and should ideally be removed in favor of the more
general method that uses the algorithm ConvertUnits.
@param dsp_values to convert from dSpacing
@param ws workspace with the appropriate instrument / geometry definition
@param wks_index index of the spectrum
Returns:
input values converted from dSpacing to ToF
"""
det = ws.getDetector(wks_index)
# Current detector parameters
detL2 = det.getDistance(ws.getInstrument().getSample())
detTwoTheta = ws.detectorTwoTheta(det)
# hard coded equation to convert dSpacing -> TOF for the single detector
# Values (in principle, expected peak positions) in TOF for the detector
tof_values = [252.816 * 2 * (50 + detL2) * math.sin(detTwoTheta / 2.0) * ep for ep in dsp_values]
return tof_values
def _create_fitted_peaks_table(self, tbl_name):
"""
Creates a table where to put peak fitting results to
@param tbl_name :: name of the table workspace (can be empty)
"""
table = None
if not tbl_name:
alg = self.createChildAlgorithm('CreateEmptyTableWorkspace')
alg.execute()
table = alg.getProperty('OutputWorkspace').value
else:
import mantid.simpleapi as sapi
table = sapi.CreateEmptyTableWorkspace(OutputWorkspace=tbl_name)
table.addColumn('double', 'dSpacing')
for param in ['A0', 'A1', 'X0', 'A', 'B', 'S', 'I']:
table.addColumn('double', param)
table.addColumn('double', param + '_Err')
table.addColumn('double', 'Chi')
return table
def _peak_is_acceptable(self, fitted_params, wks, wks_index):
"""
Decide whether a peak fitted looks acceptable, based on the values fitted for the
parameters of the peak and other metrics from Fit (Chi^2).
It applies for example a simple rule: if the peak center is
negative, it is obviously a fit failure. This is sometimes not so straightforward
from the error estimates and Chi^2 values returned from Fit, as there seem to be
a small percentage of cases with numberical issues (nan, zeros, etc.).
@param fitted_params :: parameters fitted from Fit algorithm
@param in_wks :: input workspace where a spectrum was fitted
@param wks_index :: workspace index of the spectrum that was fitted
Returns:
True if the peak function parameters and error estimates look acceptable
so the peak should be used.
"""
spec_x_axis = wks.readX(wks_index)
center = self._find_peak_center_in_params(fitted_params)
intensity = self._find_peak_intensity_in_params(fitted_params)
return (center >= spec_x_axis.min() and center <= spec_x_axis.max() and
intensity > 0 and
fitted_params['Chi'] < 10 and self._b2bexp_is_acceptable(fitted_params))
def _find_peak_center_in_params(self, fitted_params):
"""
Retrieve the fitted peak center/position from the set of parameters fitted.
Returns:
The peak center from the fitted parameters
"""
if 'BackToBackExponential' == self.PEAK_TYPE:
return fitted_params['X0']
else:
raise ValueError('Inconsistency found. I do not know how to deal with centers of peaks '
'of types other than {0}'.format(PEAK_TYPE))
def _find_peak_intensity_in_params(self, fitted_params):
"""
Retrieve the fitted peak intensity/height/amplitude from the set of parameters fitted.
Returns:
The peak intensity from the fitted parameters
"""
if 'BackToBackExponential' == self.PEAK_TYPE:
return fitted_params['I']
else:
raise ValueError('Inconsistency found. I do not know how to deal with intensities of '
'peaks of types other than {0}'.format(PEAK_TYPE))
def _b2bexp_is_acceptable(self, fitted_params):
"""
Checks specific to Back2BackExponential peak functions.
@param fitted_params :: parameters fitted, where it is assumed that the
standard Back2BackExponential parameter names are used
Returns:
True if the Bk2BkExponential parameters and error estimates look acceptable
so the peak should be used.
"""
# Ban: negative centers, negative left (A) and right (B) exponential coefficient,
# and Gaussian spread (S).
# Also ban strange error estimates (nan, all zero error)
# And make sure that the error on the center (X0) is not too big in relative terms
return (fitted_params['X0'] > 0
and fitted_params['A'] > 0 and fitted_params['B'] > 0 and fitted_params['S'] > 0
and not math.isnan(fitted_params['X0_Err'])
and not math.isnan(fitted_params['A_Err'])
and not math.isnan(fitted_params['B_Err'])
and fitted_params['X0_Err'] < (fitted_params['X0'] * 100.0 / self.CENTER_ERROR_LIMIT)
and
(0 != fitted_params['X0_Err'] and 0 != fitted_params['A_Err'] and
0 != fitted_params['B_Err'] and 0 != fitted_params['S_Err'] and
0 != fitted_params['I_Err'])
)
def _add_parameters_to_map(self, param_map, param_table):
"""
Takes parameters from a table that contains output parameters from a Fit run, and adds
them as name:value and name_Err:error_value pairs to the map.
@param param_map :: map where to add the fitting parameters
@param param_table :: table with parameters from a Fit algorithm run
"""
for i in range(param_table.rowCount() - 1): # Skip the last (fit goodness) row
row = param_table.row(i)
# Get local func. param name. E.g., not f1.A0, but just A0
name = (row['Name'].rpartition('.'))[2]
param_map[name] = row['Value']
param_map[name + '_Err'] = row['Error']
AlgorithmFactory.subscribe(EnggFitPeaks) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytree nodes with extra formatting information.
This is a thin wrapper around a pytree.Leaf node.
"""
import keyword
import re
from lib2to3 import pytree
from lib2to3.pgen2 import token
from yapf.yapflib import pytree_utils
from yapf.yapflib import style
CONTINUATION = token.N_TOKENS
token.N_TOKENS += 1
class Subtype(object):
"""Subtype information about tokens.
Gleaned from parsing the code. Helps determine the best formatting.
"""
NONE = 0
UNARY_OPERATOR = 1
BINARY_OPERATOR = 2
SUBSCRIPT_COLON = 3
DEFAULT_OR_NAMED_ASSIGN = 4
VARARGS_STAR = 5
KWARGS_STAR_STAR = 6
ASSIGN_OPERATOR = 7
DICTIONARY_KEY = 8
DICTIONARY_VALUE = 9
DICT_SET_GENERATOR = 10
COMP_FOR = 11
COMP_IF = 12
IF_TEST_EXPR = 13
DEFAULT_OR_NAMED_ASSIGN_ARG_LIST = 14
class FormatToken(object):
"""A wrapper around pytree Leaf nodes.
This represents the token plus additional information useful for reformatting
the code.
Attributes:
next_token: The token in the unwrapped line after this token or None if this
is the last token in the unwrapped line.
previous_token: The token in the unwrapped line before this token or None if
this is the first token in the unwrapped line.
matching_bracket: If a bracket token ('[', '{', or '(') the matching
bracket.
whitespace_prefix: The prefix for the whitespace.
spaces_required_before: The number of spaces required before a token. This
is a lower-bound for the formatter and not a hard requirement. For
instance, a comment may have n required spaces before it. But the
formatter won't place n spaces before all comments. Only those that are
moved to the end of a line of code. The formatter may use different
spacing when appropriate.
can_break_before: True if we're allowed to break before this token.
must_break_before: True if we're required to break before this token.
total_length: The total length of the unwrapped line up to and including
whitespace and this token. However, this doesn't include the initial
indentation amount.
split_penalty: The penalty for splitting the line before this token.
"""
def __init__(self, node):
"""Constructor.
Arguments:
node: (pytree.Leaf) The node that's being wrapped.
"""
assert isinstance(node, pytree.Leaf)
self._node = node
self.next_token = None
self.previous_token = None
self.matching_bracket = None
self.whitespace_prefix = ''
self.can_break_before = False
self.must_break_before = False
self.total_length = 0 # TODO(morbo): Think up a better name.
self.split_penalty = 0
if self.is_comment:
self.spaces_required_before = style.Get('SPACES_BEFORE_COMMENT')
else:
self.spaces_required_before = 0
def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0):
"""Register a token's whitespace prefix.
This is the whitespace that will be output before a token's string.
Arguments:
newlines_before: (int) The number of newlines to place before the token.
spaces: (int) The number of spaces to place before the token.
indent_level: (int) The indentation level.
"""
spaces_before = (
' ' * indent_level * style.Get('INDENT_WIDTH') + ' ' * spaces
)
if self.is_comment:
comment_lines = [s.lstrip() for s in self.value.splitlines()]
self._node.value = ('\n' + spaces_before).join(comment_lines)
self.whitespace_prefix = (
'\n' * (self.newlines or newlines_before) + spaces_before
)
def AdjustNewlinesBefore(self, newlines_before):
"""Change the number of newlines before this token."""
self.whitespace_prefix = (
'\n' * newlines_before + self.whitespace_prefix.lstrip('\n')
)
def RetainHorizontalSpacing(self, first_column, depth):
"""Retains a token's horizontal spacing."""
previous = self.previous_token
if previous is None:
return
cur_lineno = self.lineno
prev_lineno = previous.lineno
if previous.is_multiline_string:
prev_lineno += previous.value.count('\n')
if cur_lineno != prev_lineno:
if not previous.is_continuation:
self.spaces_required_before = (
self.column - first_column + depth * style.Get('INDENT_WIDTH'))
return
cur_column = self.node.column
prev_column = previous.node.column
if self.spaces_required_before != 2:
prev_len = len(previous.value)
if previous.is_multiline_string:
prev_len = len(previous.value.split('\n')[-1])
self.spaces_required_before = cur_column - (prev_column + prev_len)
def OpensScope(self):
return self.value in pytree_utils.OPENING_BRACKETS
def ClosesScope(self):
return self.value in pytree_utils.CLOSING_BRACKETS
def GetPytreeNode(self):
return self._node
@property
def value(self):
return self._node.value
@property
def node(self):
return self._node
@property
def node_split_penalty(self):
"""Split penalty attached to the pytree node of this token.
Returns:
The penalty, or None if no annotation is attached.
"""
return pytree_utils.GetNodeAnnotation(self._node,
pytree_utils.Annotation.SPLIT_PENALTY,
default=0)
@property
def newlines(self):
"""The number of newlines needed before this token."""
return pytree_utils.GetNodeAnnotation(self._node,
pytree_utils.Annotation.NEWLINES)
@property
def column(self):
"""The original column number of the node in the source."""
return self._node.column
@property
def lineno(self):
"""The original line number of the node in the source."""
return self._node.lineno
@property
def subtypes(self):
"""Extra type information for directing formatting."""
value = pytree_utils.GetNodeAnnotation(self._node,
pytree_utils.Annotation.SUBTYPE)
return [Subtype.NONE] if value is None else value
@property
def is_binary_op(self):
"""Token is a binary operator."""
return Subtype.BINARY_OPERATOR in self.subtypes
@property
def name(self):
"""A string representation of the node's name."""
return pytree_utils.NodeName(self._node)
def __repr__(self):
return 'FormatToken(name={0}, value={1})'.format(self.name, self.value)
@property
def is_comment(self):
return self._node.type == token.COMMENT
@property
def is_continuation(self):
return self._node.type == CONTINUATION
@property
def is_keyword(self):
return keyword.iskeyword(self.value)
@property
def is_name(self):
return self._node.type == token.NAME and not self.is_keyword
@property
def is_number(self):
return self._node.type == token.NUMBER
@property
def is_string(self):
return self._node.type == token.STRING
@property
def is_multiline_string(self):
return (self.is_string and
re.match(r'^[uUbB]?[rR]?(?P<delim>"""|\'\'\').*(?P=delim)$',
self.value, re.DOTALL) is not None)
@property
def is_docstring(self):
return self.is_multiline_string and not self.node.prev_sibling | unknown | codeparrot/codeparrot-clean | ||
from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Client.py 1496 2010-03-04 23:46:17Z pooryorick $'
from version import __version__
#import xml.sax
import urllib
from types import *
import re
import base64
import socket, httplib
from httplib import HTTPConnection, HTTP
import Cookie
# SOAPpy modules
from Errors import *
from Config import Config
from Parser import parseSOAPRPC
from SOAPBuilder import buildSOAP
from Utilities import *
from Types import faultType, simplify
################################################################################
# Client
################################################################################
def SOAPUserAgent():
return "SOAPpy " + __version__ + " (pywebsvcs.sf.net)"
class SOAPAddress:
def __init__(self, url, config = Config):
proto, uri = urllib.splittype(url)
# apply some defaults
if uri[0:2] != '//':
if proto != None:
uri = proto + ':' + uri
uri = '//' + uri
proto = 'http'
host, path = urllib.splithost(uri)
try:
int(host)
host = 'localhost:' + host
except:
pass
if not path:
path = '/'
if proto not in ('http', 'https', 'httpg'):
raise IOError, "unsupported SOAP protocol"
if proto == 'httpg' and not config.GSIclient:
raise AttributeError, \
"GSI client not supported by this Python installation"
if proto == 'https' and not config.SSLclient:
raise AttributeError, \
"SSL client not supported by this Python installation"
self.user,host = urllib.splituser(host)
self.proto = proto
self.host = host
self.path = path
def __str__(self):
return "%(proto)s://%(host)s%(path)s" % self.__dict__
__repr__ = __str__
class SOAPTimeoutError(socket.timeout):
'''This exception is raised when a timeout occurs in SOAP operations'''
pass
class HTTPConnectionWithTimeout(HTTPConnection):
'''Extend HTTPConnection for timeout support'''
def __init__(self, host, port=None, strict=None, timeout=None):
HTTPConnection.__init__(self, host, port, strict)
self._timeout = timeout
def connect(self):
HTTPConnection.connect(self)
if self.sock and self._timeout:
self.sock.settimeout(self._timeout)
class HTTPWithTimeout(HTTP):
_connection_class = HTTPConnectionWithTimeout
## this __init__ copied from httplib.HTML class
def __init__(self, host='', port=None, strict=None, timeout=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict, timeout))
class HTTPTransport:
def __init__(self):
self.cookies = Cookie.SimpleCookie();
def getNS(self, original_namespace, data):
"""Extract the (possibly extended) namespace from the returned
SOAP message."""
if type(original_namespace) == StringType:
pattern="xmlns:\w+=['\"](" + original_namespace + "[^'\"]*)['\"]"
match = re.search(pattern, data)
if match:
return match.group(1)
else:
return original_namespace
else:
return original_namespace
def __addcookies(self, r):
'''Add cookies from self.cookies to request r
'''
for cname, morsel in self.cookies.items():
attrs = []
value = morsel.get('version', '')
if value != '' and value != '0':
attrs.append('$Version=%s' % value)
attrs.append('%s=%s' % (cname, morsel.coded_value))
value = morsel.get('path')
if value:
attrs.append('$Path=%s' % value)
value = morsel.get('domain')
if value:
attrs.append('$Domain=%s' % value)
r.putheader('Cookie', "; ".join(attrs))
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = Config, timeout=None):
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'httpg':
from pyGlobus.io import GSIHTTP
r = GSIHTTP(real_addr, tcpAttr = config.tcpAttr)
elif addr.proto == 'https':
r = httplib.HTTPS(real_addr, key_file=config.SSL.key_file, cert_file=config.SSL.cert_file)
else:
r = HTTPWithTimeout(real_addr, timeout=timeout)
r.putrequest("POST", real_path)
r.putheader("Host", addr.host)
r.putheader("User-agent", SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset=%s' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
self.__addcookies(r);
# if user is not a user:passwd format
# we'll receive a failure from the server. . .I guess (??)
if addr.user != None:
val = base64.encodestring(addr.user)
r.putheader('Authorization','Basic ' + val.replace('\012',''))
# This fixes sending either "" or "None"
if soapaction == None or len(soapaction) == 0:
r.putheader("SOAPAction", "")
else:
r.putheader("SOAPAction", '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
debugHeader(s)
print "POST %s %s" % (real_path, r._http_vsn_str)
print "Host:", addr.host
print "User-agent: SOAPpy " + __version__ + " (http://pywebsvcs.sf.net)"
print "Content-type:", t
print "Content-length:", len(data)
print 'SOAPAction: "%s"' % soapaction
debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
# send the payload
r.send(data)
# read response line
code, msg, headers = r.getreply()
self.cookies = Cookie.SimpleCookie();
if headers:
content_type = headers.get("content-type","text/xml")
content_length = headers.get("Content-length")
for cookie in headers.getallmatchingheaders("Set-Cookie"):
self.cookies.load(cookie);
else:
content_type=None
content_length=None
# work around OC4J bug which does '<len>, <len>' for some reaason
if content_length:
comma=content_length.find(',')
if comma>0:
content_length = content_length[:comma]
# attempt to extract integer message size
try:
message_len = int(content_length)
except:
message_len = -1
if message_len < 0:
# Content-Length missing or invalid; just read the whole socket
# This won't work with HTTP/1.1 chunked encoding
data = r.getfile().read()
message_len = len(data)
else:
data = r.getfile().read(message_len)
if(config.debug):
print "code=",code
print "msg=", msg
print "headers=", headers
print "content-type=", content_type
print "data=", data
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
if headers.headers:
print "HTTP/1.? %d %s" % (code, msg)
print "\n".join(map (lambda x: x.strip(), headers.headers))
else:
print "HTTP/0.9 %d %s" % (code, msg)
debugFooter(s)
def startswith(string, val):
return string[0:len(val)] == val
if code == 500 and not \
( startswith(content_type, "text/xml") and message_len > 0 ):
raise HTTPError(code, msg)
if config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if (len(data)>0) and (data[-1] != '\n'):
print
debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
# return response payload
return data, new_ns
################################################################################
# SOAP Proxy
################################################################################
class SOAPProxy:
def __init__(self, proxy, namespace = None, soapaction = None,
header = None, methodattrs = None, transport = HTTPTransport,
encoding = 'UTF-8', throw_faults = 1, unwrap_results = None,
http_proxy=None, config = Config, noroot = 0,
simplify_objects=None, timeout=None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
# get default values for unwrap_results and simplify_objects
# from config
if unwrap_results is None:
self.unwrap_results=config.unwrap_results
else:
self.unwrap_results=unwrap_results
if simplify_objects is None:
self.simplify_objects=config.simplify_objects
else:
self.simplify_objects=simplify_objects
self.proxy = SOAPAddress(proxy, config)
self.namespace = namespace
self.soapaction = soapaction
self.header = header
self.methodattrs = methodattrs
self.transport = transport()
self.encoding = encoding
self.throw_faults = throw_faults
self.http_proxy = http_proxy
self.config = config
self.noroot = noroot
self.timeout = timeout
# GSI Additions
if hasattr(config, "channel_mode") and \
hasattr(config, "delegation_mode"):
self.channel_mode = config.channel_mode
self.delegation_mode = config.delegation_mode
#end GSI Additions
def invoke(self, method, args):
return self.__call(method, args, {})
def __call(self, name, args, kw, ns = None, sa = None, hd = None,
ma = None):
ns = ns or self.namespace
ma = ma or self.methodattrs
if sa: # Get soapaction
if type(sa) == TupleType:
sa = sa[0]
else:
if self.soapaction:
sa = self.soapaction
else:
sa = name
if hd: # Get header
if type(hd) == TupleType:
hd = hd[0]
else:
hd = self.header
hd = hd or self.header
if ma: # Get methodattrs
if type(ma) == TupleType: ma = ma[0]
else:
ma = self.methodattrs
ma = ma or self.methodattrs
m = buildSOAP(args = args, kw = kw, method = name, namespace = ns,
header = hd, methodattrs = ma, encoding = self.encoding,
config = self.config, noroot = self.noroot)
call_retry = 0
try:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config,
timeout = self.timeout)
except socket.timeout:
raise SOAPTimeoutError
except Exception, ex:
#
# Call failed.
#
# See if we have a fault handling vector installed in our
# config. If we do, invoke it. If it returns a true value,
# retry the call.
#
# In any circumstance other than the fault handler returning
# true, reraise the exception. This keeps the semantics of this
# code the same as without the faultHandler code.
#
if hasattr(self.config, "faultHandler"):
if callable(self.config.faultHandler):
call_retry = self.config.faultHandler(self.proxy, ex)
if not call_retry:
raise
else:
raise
else:
raise
if call_retry:
try:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config,
timeout = self.timeout)
except socket.timeout:
raise SOAPTimeoutError
p, attrs = parseSOAPRPC(r, attrs = 1)
try:
throw_struct = self.throw_faults and \
isinstance (p, faultType)
except:
throw_struct = 0
if throw_struct:
if Config.debug:
print p
raise p
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
if self.unwrap_results:
try:
count = 0
for i in p.__dict__.keys():
if i[0] != "_": # don't count the private stuff
count += 1
t = getattr(p, i)
if count == 1: # Only one piece of data, bubble it up
p = t
except:
pass
# Automatically simplfy SOAP complex types into the
# corresponding python types. (structType --> dict,
# arrayType --> array, etc.)
if self.simplify_objects:
p = simplify(p)
if self.config.returnAllAttrs:
return p, attrs
return p
def _callWithBody(self, body):
return self.__call(None, body, {})
def __getattr__(self, name): # hook to catch method calls
if name in ( '__del__', '__getinitargs__', '__getnewargs__',
'__getstate__', '__setstate__', '__reduce__', '__reduce_ex__'):
raise AttributeError, name
return self.__Method(self.__call, name, config = self.config)
# To handle attribute weirdness
class __Method:
# Some magic to bind a SOAP method to an RPC server.
# Supports "nested" methods (e.g. examples.getStateName) -- concept
# borrowed from xmlrpc/soaplib -- www.pythonware.com
# Altered (improved?) to let you inline namespaces on a per call
# basis ala SOAP::LITE -- www.soaplite.com
def __init__(self, call, name, ns = None, sa = None, hd = None,
ma = None, config = Config):
self.__call = call
self.__name = name
self.__ns = ns
self.__sa = sa
self.__hd = hd
self.__ma = ma
self.__config = config
return
def __call__(self, *args, **kw):
if self.__name[0] == "_":
if self.__name in ["__repr__","__str__"]:
return self.__repr__()
else:
return self.__f_call(*args, **kw)
else:
return self.__r_call(*args, **kw)
def __getattr__(self, name):
if name == '__del__':
raise AttributeError, name
if self.__name[0] == "_":
# Don't nest method if it is a directive
return self.__class__(self.__call, name, self.__ns,
self.__sa, self.__hd, self.__ma)
return self.__class__(self.__call, "%s.%s" % (self.__name, name),
self.__ns, self.__sa, self.__hd, self.__ma)
def __f_call(self, *args, **kw):
if self.__name == "_ns": self.__ns = args
elif self.__name == "_sa": self.__sa = args
elif self.__name == "_hd": self.__hd = args
elif self.__name == "_ma": self.__ma = args
return self
def __r_call(self, *args, **kw):
return self.__call(self.__name, args, kw, self.__ns, self.__sa,
self.__hd, self.__ma)
def __repr__(self):
return "<%s at %d>" % (self.__class__, id(self)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
# TODO(jeanluc) The way we currently generate libraries makes Visual
# Studio 2010 unhappy. We get a lot of warnings like:
# warning MSB8012: TargetPath(...\Debug\gles2_c_lib.lib) does not match
# the Library's OutputFile property value (...\Debug\lib\gles2_c_lib.lib).
# This may cause your project to build incorrectly. To correct this,
# please make sure that $(OutDir), $(TargetName) and $(TargetExt) property
# values match the value specified in %(Lib.OutputFile).
# Despite the warnings, this compile correctly. It would be nice to get rid
# of the warnings.
# TODO(jeanluc) I had: 'LIB_DIR': '$(OutDir)lib',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_shard',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Fix the paths
# If the argument starts with a slash, it's probably a command line switch
arguments = [i.startswith('/') and i or _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)','%INPUTDIR%') for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1))/2*4)*'\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set(_FixPaths(inputs))
outputs = set(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return
if version.UsesVcxproj():
_GenerateMSBuildProject(project, options, version, generator_flags)
else:
_GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, project_dir, sources, excluded_sources, list_excluded))
# Add in files.
_VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = set()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
# TODO(jeanluc) If we want to avoid the MSB8012 warnings in
# VisualStudio 2010, we will have to change the value of $(OutDir)
# to contain the \lib suffix, rather than doing it as below.
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)\\lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
# TODO(jeanluc) If we want to avoid the MSB8012 warning, we should
# add code like the following to place libraries in their own directory.
# if config_type == '4':
# output_dir = spec.get('product_dir', output_dir + '\\lib')
prepared_attrs['OutputDirectory'] = output_dir
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(spec, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%d' % (parts[0], number)
return '#'.join(parts)
def _ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = _ShardTargets(target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version, generator_flags)
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_lines_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
logging_section,
message_section,
write_lines_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['TargetName', target_name],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
def _GetMSBuildAttributes(spec, config, build_file):
# Use the MSVS attributes and convert them. In the future, we may want to
# support Gyp files specifying 'msbuild_configuration_attributes' directly.
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = {
'0': 'MultiByte',
'1': 'Unicode'
}[msvs_attributes[a]]
elif a == 'ConfigurationType':
msbuild_attributes[a] = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[msvs_attributes[a]]
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
for name, values in sorted(properties.iteritems()):
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(msbuild_settings, 'Link', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalDependencies',
'AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
_VerifySourcesExist(source.contents, root_dir)
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
print 'Warning: Missing input file ' + full_path + ' pwd=' +\
os.getcwd()
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
msbuildproj_dir = os.path.dirname(project.path)
if msbuildproj_dir and not os.path.exists(msbuildproj_dir):
os.makedirs(msbuildproj_dir)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded))
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
_VerifySourcesExist(sources, project_dir)
for (_, configuration) in configurations.iteritems():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path)
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = set()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
command = ' && '.join(commands)
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.