hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aceb94572b27087b91323077512ace535e9524a6 | 1,522 | py | Python | src/flyttdata/__main__.py | pontushojer/flyttdata | 9d22d29b29d7197dbd62d1bb553014d32c6bfcb6 | [
"MIT"
] | null | null | null | src/flyttdata/__main__.py | pontushojer/flyttdata | 9d22d29b29d7197dbd62d1bb553014d32c6bfcb6 | [
"MIT"
] | null | null | null | src/flyttdata/__main__.py | pontushojer/flyttdata | 9d22d29b29d7197dbd62d1bb553014d32c6bfcb6 | [
"MIT"
] | null | null | null | """
Flyttdata
"""
import sys
import logging
import pkgutil
import importlib
from argparse import ArgumentParser
import flyttdata.cli as cli_package
logger = logging.getLogger(__name__)
def main() -> int:
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(module)s - %(levelname)s: %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
parser = ArgumentParser(description=__doc__, prog="flyttdata")
parser.add_argument("--version", action="version", version="%(prog)s 0.1")
subparsers = parser.add_subparsers()
# Import each module that implements a subcommand and add a subparser for it.
# Each subcommand is implemented as a module in the cli subpackage.
# It needs to implement an add_arguments() and a main() function.
modules = pkgutil.iter_modules(cli_package.__path__)
for _, module_name, _ in modules:
module = importlib.import_module("." + module_name, cli_package.__name__)
help_message = module.__doc__
subparser = subparsers.add_parser(
module_name, help=help_message, description=module.__doc__
)
subparser.set_defaults(module=module)
module.add_arguments(subparser)
args = parser.parse_args()
if not hasattr(args, "module"):
parser.error("Please provide the name of a subcommand to run")
else:
module = args.module
del args.module
module.main(args)
return 0
if __name__ == "__main__":
sys.exit(main())
| 31.061224 | 87 | 0.666229 |
aceb953df1854cb8a44cf4b5e6a26b7fef386e9f | 21,435 | py | Python | bot/trade.py | SparkySparkman/Binance-volatility-trading-bot | 8798e55b4025add45f65f08a44ab2d4f57e51442 | [
"MIT"
] | 17 | 2021-07-15T19:57:33.000Z | 2022-01-19T13:32:44.000Z | bot/trade.py | amin-IT/Binance-volatility-trading-bot | 5648f393abb93ec1b9899205787f1881cbf6e37e | [
"MIT"
] | null | null | null | bot/trade.py | amin-IT/Binance-volatility-trading-bot | 5648f393abb93ec1b9899205787f1881cbf6e37e | [
"MIT"
] | 5 | 2021-07-30T07:09:37.000Z | 2021-11-04T14:14:04.000Z | import os
# use if needed to pass args to external modules
import sys
# used for directory handling
import glob
import time
import threading
#gogo MOD telegram needs import request
import requests
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
# used to store trades and sell assets
import json
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key,
load_telegram_creds
)
from bot.settings import *
from bot.grab import *
def trailing_buy(volatile_coins):
global trail_buy_historical
global trail_buy_coins
buy_volatile_coins = {}
trail_buy_last_price = get_price(False)
for coin in volatile_coins:
trail_buy_coins[coin] = volatile_coins[coin]
for coin in trail_buy_coins:
if float(trail_buy_historical[coin]['price']) > float(trail_buy_last_price[coin]['price']):
trail_buy_coins[coin] = trail_buy_coins[coin] + (-1.0 *(float(trail_buy_historical[coin]['price']) - float(trail_buy_last_price[coin]['price'])) / float(trail_buy_historical[coin]['price']) * 100)
print(f"COIN: {coin} has DROPPED from {trail_buy_historical[coin]['price']} to {trail_buy_last_price[coin]['price']}")
print(f"COIN: {coin} has DROPPED for {-1.0 *(float(trail_buy_historical[coin]['price']) - float(trail_buy_last_price[coin]['price'])) / float(trail_buy_historical[coin]['price']) * 100}%")
if float(trail_buy_historical[coin]['price']) < float(trail_buy_last_price[coin]['price']):
print(f"COIN: {coin} has GONE UP!!!! from {trail_buy_historical[coin]['price']} to {trail_buy_last_price[coin]['price']}")
print(f"COIN: {coin} has GONE UP!!!! for {-1.0 *(float(trail_buy_historical[coin]['price']) - float(trail_buy_last_price[coin]['price'])) / float(trail_buy_historical[coin]['price']) * 100}%")
if float(-1.0 *(float(trail_buy_historical[coin]['price']) - float(trail_buy_last_price[coin]['price'])) / float(trail_buy_historical[coin]['price']) * 100) > settings_struct['TRAILING_BUY_THRESHOLD']:
buy_volatile_coins[coin] = trail_buy_coins[coin]
if buy_volatile_coins:
for coin in buy_volatile_coins:
del trail_buy_coins[coin]
trail_buy_historical = trail_buy_last_price
print(f"TRAIL_BUY_COINS: {trail_buy_coins}")
print(f"BUY_VOLATILE_COINS: {buy_volatile_coins}")
return buy_volatile_coins
def trade_calculations(type, priceChange):
if type == 'holding':
if trading_struct['max_holding_price'] < priceChange :
trading_struct['max_holding_price'] = priceChange
if trading_struct['min_holding_price'] > priceChange :
trading_struct['min_holding_price'] = priceChange
session_struct['unrealised_percent'] = session_struct['unrealised_percent'] + priceChange
if type == 'sell':
if priceChange > 0:
session_struct['win_trade_count'] = session_struct['win_trade_count'] + 1
session_struct['last_trade_won'] = True
trading_struct['consecutive_loss'] = 0
trading_struct['won_trade_percent'] = priceChange
trading_struct['sum_won_trades'] = trading_struct['sum_won_trades'] + trading_struct['won_trade_percent']
else:
session_struct['loss_trade_count'] = session_struct['loss_trade_count'] + 1
session_struct['last_trade_won'] = False
if session_struct['last_trade_won'] == False:
trading_struct['consecutive_loss'] += 1
trading_struct['lost_trade_percent'] = priceChange
trading_struct['sum_lost_trades'] = trading_struct['sum_lost_trades'] + trading_struct['lost_trade_percent']
settings_struct['STOP_LOSS'] = (settings_struct['STOP_LOSS'] + session_struct['profit_to_trade_ratio']) / 2
trading_struct['sum_max_holding_price'] = trading_struct['sum_max_holding_price'] + trading_struct['max_holding_price']
trading_struct['max_holding_price'] = 0
trading_struct['sum_min_holding_price'] = trading_struct['sum_min_holding_price'] + trading_struct['min_holding_price']
trading_struct['min_holding_price'] = 0
session_struct['closed_trades_percent'] = session_struct['closed_trades_percent'] + priceChange
session_struct['reload_tickers_list'] = True
session_struct['unrealised_percent'] = 0
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
#added feature to buy only if percent and signal triggers uses PERCENT_SIGNAL_BUY true or false from config
if PERCENT_SIGNAL_BUY == True:
volatile_coins, number_of_coins, last_price = wait_for_price('percent_mix_signal')
else:
volatile_coins, number_of_coins, last_price = wait_for_price('percent_and_signal')
buy_volatile_coins = {}
lot_size = {}
volume = {}
buy_volatile_coins = trailing_buy(volatile_coins)
for coin in buy_volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
step_size = session_struct['symbol_info'][coin]
lot_size[coin] = step_size.index('1') - 1
except KeyError:
# not retrieved at startup, try again
try:
coin_info = client.get_symbol_info(coin)
step_size = coin_info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
except:
pass
lot_size[coin] = max(lot_size[coin], 0)
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def test_order_id():
import random
"""returns a fake order id by hashing the current time"""
test_order_id_number = random.randint(100000000,999999999)
return test_order_id_number
def buy():
'''Place Buy market orders for each volatile coin found'''
global UNIQUE_BUYS
volume, last_price = convert_volume()
orders = {}
for coin in volume:
BUYABLE = True
if UNIQUE_BUYS and (coin in coins_bought):
BUYABLE = False
# only buy if the there are no active trades on the coin
if BUYABLE:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
REPORT = str(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': test_order_id(),
'time': datetime.now().timestamp()
}]
# Log trades
report_struct['report'] = REPORT
report_struct['log'] = True
continue
# try to create a real order if the test orders did not raise an exception
try:
order_details = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
# Log, announce, and report trade
print('Order returned, saving order to file')
if not TEST_MODE:
orders[coin] = extract_order_data(order_details)
REPORT = str(f"BUY: bought {orders[coin]['volume']} {coin} - average price: {orders[coin]['avgPrice']} {PAIR_WITH}")
report_struct['report'] = REPORT
report_struct['log'] = True
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global session_struct, settings_struct, trading_struct
global hsp_head
global FULL_LOG
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
holding_timeout_sell_trigger = False
for coin in list(coins_bought):
BUY_PRICE = float(coins_bought[coin]['bought_at'])
# coinTakeProfit is the price at which to 'take profit' based on config % markup
coinTakeProfit = BUY_PRICE + ((BUY_PRICE * coins_bought[coin]['take_profit']) / 100)
# coinStopLoss is the price at which to 'stop losses' based on config % markdown
coinStopLoss = BUY_PRICE + ((BUY_PRICE * coins_bought[coin]['stop_loss']) / 100)
# coinHoldingTimeLimit is the time limit for holding onto a coin
coinHoldingTimeLimit = float(coins_bought[coin]['timestamp']) + settings_struct['HOLDING_TIME_LIMIT']
lastPrice = float(last_price[coin]['price'])
LAST_PRICE = "{:.8f}".format(lastPrice)
sellFee = (coins_bought[coin]['volume'] * lastPrice) * (TRADING_FEE/100)
buyPrice = float(coins_bought[coin]['bought_at'])
BUY_PRICE = "{:.8f}". format(buyPrice)
buyFee = (coins_bought[coin]['volume'] * buyPrice) * (TRADING_FEE/100)
# Note: priceChange and priceChangeWithFee are percentages!
priceChange = float((lastPrice - buyPrice) / buyPrice * 100)
# priceChange = (0.00006648 - 0.00006733) / 0.00006733 * 100
# volume = 150
# buyPrice: 0.00006733
# lastPrice: 0.00006648
# buyFee = (150 * 0.00006733) * (0.075/100)
# buyFee = 0.000007574625
# sellFee = (150 * 0.00006648) * (0.075/100)
# sellFee = 0.000007479
# check that the price is above the take profit and readjust coinStopLoss and coinTakeProfit accordingly if trialing stop loss used
if lastPrice > coinTakeProfit and USE_TRAILING_STOP_LOSS:
# increasing coinTakeProfit by TRAILING_TAKE_PROFIT (essentially next time to readjust coinStopLoss)
coins_bought[coin]['take_profit'] = priceChange + settings_struct['TRAILING_TAKE_PROFIT']
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - settings_struct['TRAILING_STOP_LOSS']
if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.{decimals()}f} and SL {coins_bought[coin]['stop_loss']:.{decimals()}f} accordingly to lock-in profit")
continue
if not TEST_MODE:
current_time = float(round(time.time() * 1000))
# print(f'TL:{coinHoldingTimeLimit}, time: {current_time} HOLDING_TIME_LIMIT: {HOLDING_TIME_LIMIT}, TimeLeft: {(coinHoldingTimeLimit - current_time)/1000/60} ')
if TEST_MODE:
current_time = float(round(time.time()))
# print(f'TL:{coinHoldingTimeLimit}, time: {current_time} HOLDING_TIME_LIMIT: {HOLDING_TIME_LIMIT}, TimeLeft: {(coinHoldingTimeLimit - current_time)/60} ')
trade_calculations('holding', priceChange)
if coinHoldingTimeLimit < current_time and priceChange > settings_struct['HOLDING_PRICE_THRESHOLD']:
holding_timeout_sell_trigger = True
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if session_struct['sell_all_coins'] == True or lastPrice < coinStopLoss or lastPrice > coinTakeProfit and not USE_TRAILING_STOP_LOSS or holding_timeout_sell_trigger == True:
print(f"{txcolors.SELL_PROFIT if priceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin}. Bought at: {BUY_PRICE} (Price now: {LAST_PRICE}) - {priceChange:.2f}% - Est: {(QUANTITY * priceChange) / 100:.{decimals()}f} {PAIR_WITH}{txcolors.DEFAULT}")
# try to create a real order
try:
if not TEST_MODE:
order_details = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
if not TEST_MODE:
coins_sold[coin] = extract_order_data(order_details)
lastPrice = coins_sold[coin]['avgPrice']
sellFee = coins_sold[coin]['tradeFee']
coins_sold[coin]['orderid'] = coins_bought[coin]['orderid']
priceChange = float((lastPrice - buyPrice) / buyPrice * 100)
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
# Log trade
profit = lastPrice - buyPrice
trade_calculations('sell', priceChange)
#gogo MOD to trigger trade lost or won and to count lost or won trades
if session_struct['sell_all_coins'] == True: REPORT = f"PAUSE_SELL - SELL: {coins_sold[coin]['volume']} {coin} - Bought at {buyPrice:.{decimals()}f}, sold at {lastPrice:.{decimals()}f} - Profit: {profit:.{decimals()}f} {PAIR_WITH} ({priceChange:.2f}%)"
if lastPrice < coinStopLoss: REPORT = f"STOP_LOSS - SELL: {coins_sold[coin]['volume']} {coin} - Bought at {buyPrice:.{decimals()}f}, sold at {lastPrice:.{decimals()}f} - Profit: {profit:.{decimals()}f} {PAIR_WITH} ({priceChange:.2f}%)"
if lastPrice > coinTakeProfit: REPORT = f"TAKE_PROFIT - SELL: {coins_sold[coin]['volume']} {coin} - Bought at {buyPrice:.{decimals()}f}, sold at {lastPrice:.{decimals()}f} - Profit: {profit:.{decimals()}f} {PAIR_WITH} ({priceChange:.2f}%)"
if holding_timeout_sell_trigger: REPORT = f"HOLDING_TIMEOUT - SELL: {coins_sold[coin]['volume']} {coin} - Bought at {buyPrice:.{decimals()}f}, sold at {lastPrice:.{decimals()}f} - Profit: {profit:.{decimals()}f} {PAIR_WITH} ({priceChange:.2f}%)"
session_struct['session_profit'] = session_struct['session_profit'] + profit
holding_timeout_sell_trigger = False
report_struct['report'] = REPORT
report_struct['message'] = True
report_struct['log'] = True
continue
if len(coins_bought) > 0:
print(f"TP:{coinTakeProfit:.{decimals()}f}:{coins_bought[coin]['take_profit']:.2f} or SL:{coinStopLoss:.{decimals()}f}:{coins_bought[coin]['stop_loss']:.2f} not yet reached, not selling {coin} for now >> Bought at: {BUY_PRICE} - Now: {LAST_PRICE} : {txcolors.SELL_PROFIT if priceChange >= 0. else txcolors.SELL_LOSS}{priceChange:.2f}% Est: {(QUANTITY*(priceChange-(buyFee+sellFee)))/100:.{decimals()}f} {PAIR_WITH} - CIP: {settings_struct['CHANGE_IN_PRICE_MIN']:.2f}/{settings_struct['CHANGE_IN_PRICE_MAX']:.2f} - TAKE_PROFIT: {settings_struct['TAKE_PROFIT']:.2f}{txcolors.DEFAULT}")
return coins_sold
def extract_order_data(order_details):
global TRADING_FEE, STOP_LOSS, TAKE_PROFIT
transactionInfo = {}
# adding order fill extractions here
#
# just to explain what I am doing here:
# Market orders are not always filled at one price, we need to find the averages of all 'parts' (fills) of this order.
#
# reset other variables to 0 before use
FILLS_TOTAL = 0
FILLS_QTY = 0
FILLS_FEE = 0
BNB_WARNING = 0
# loop through each 'fill':
for fills in order_details['fills']:
FILL_PRICE = float(fills['price'])
FILL_QTY = float(fills['qty'])
FILLS_FEE += float(fills['commission'])
# check if the fee was in BNB. If not, log a nice warning:
if (fills['commissionAsset'] != 'BNB') and (TRADING_FEE == 0.75) and (BNB_WARNING == 0):
print(f"WARNING: BNB not used for trading fee, please ")
BNB_WARNING += 1
# quantity of fills * price
FILLS_TOTAL += (FILL_PRICE * FILL_QTY)
# add to running total of fills quantity
FILLS_QTY += FILL_QTY
# increase fills array index by 1
# calculate average fill price:
FILL_AVG = (FILLS_TOTAL / FILLS_QTY)
tradeFeeApprox = (float(FILLS_QTY) * float(FILL_AVG)) * (TRADING_FEE/100)
# create object with received data from Binance
transactionInfo = {
'symbol': order_details['symbol'],
'orderId': order_details['orderId'],
'timestamp': order_details['transactTime'],
'avgPrice': float(FILL_AVG),
'volume': float(FILLS_QTY),
'tradeFeeBNB': float(FILLS_FEE),
'tradeFee': tradeFeeApprox,
}
return transactionInfo
def update_portfolio(orders, last_price, volume):
global session_struct
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
if not TEST_MODE:
coins_bought[coin] = {
'symbol': orders[coin]['symbol'],
'orderid': orders[coin]['orderId'],
'timestamp': orders[coin]['timestamp'],
'bought_at': orders[coin]['avgPrice'],
'volume': orders[coin]['volume'],
'buyFeeBNB': orders[coin]['tradeFeeBNB'],
'buyFee': orders[coin]['tradeFee'],
'stop_loss': -settings_struct['STOP_LOSS'],
'take_profit': settings_struct['TAKE_PROFIT'],
}
else:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -settings_struct['STOP_LOSS'],
'take_profit': settings_struct['TAKE_PROFIT'],
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
if TEST_MODE: print(f'Order for {orders[coin][0]["symbol"]} with ID {orders[coin][0]["orderId"]} placed and saved to file.')
if not TEST_MODE: print(f'Order for {orders[coin]["symbol"]} with ID {orders[coin]["orderId"]} placed and saved to file.')
session_struct['trade_slots'] = len(coins_bought)
def remove_from_portfolio(coins_sold):
global session_struct
'''Remove coins sold due to SL or TP from portfolio'''
for coin,data in coins_sold.items():
symbol = coin
order_id = data['orderid']
# code below created by getsec <3
for bought_coin, bought_coin_data in coins_bought.items():
if bought_coin_data['orderid'] == order_id:
print(f"Sold {bought_coin}, removed order ID {order_id} from history.")
coins_bought.pop(bought_coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
break
session_struct['trade_slots'] = len(coins_bought)
session_struct['reload_tickers_list'] = True
def trade_crypto():
global CONNECTION_ERROR_COUNT, READ_TIMEOUT_COUNT
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
except ReadTimeout as rt:
READ_TIMEOUT_COUNT += 1
print(f'We got a timeout error from from binance. Going to re-loop. Current Count: {READ_TIMEOUT_COUNT}')
except ConnectionError as ce:
CONNECTION_ERROR_COUNT +=1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {CONNECTION_ERROR_COUNT}\n{ce}{txcolors.DEFAULT}')
| 44.56341 | 594 | 0.633963 |
aceb95517690e72b913c1df0df7c92b77e5a3318 | 3,043 | py | Python | env/lib/python3.7/site-packages/docusign_rooms/models/field_data.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/docusign_rooms/models/field_data.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/docusign_rooms/models/field_data.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
DocuSign Rooms API - v2
An API for an integrator to access the features of DocuSign Rooms # noqa: E501
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FieldData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'dict(str, object)'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""FieldData - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this FieldData. # noqa: E501
:return: The data of this FieldData. # noqa: E501
:rtype: dict(str, object)
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this FieldData.
:param data: The data of this FieldData. # noqa: E501
:type: dict(str, object)
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FieldData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FieldData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.232759 | 83 | 0.5442 |
aceb9733980b2d2f232c11631ffc92dfdd1da29c | 1,618 | py | Python | tests/unit/admin/views/test_squats.py | Dithn/warehouse | 953b77ecfc7dade203db423307539ea9d6115657 | [
"Apache-2.0"
] | 2 | 2019-08-09T02:49:18.000Z | 2019-08-09T02:53:26.000Z | tests/unit/admin/views/test_squats.py | Dithn/warehouse | 953b77ecfc7dade203db423307539ea9d6115657 | [
"Apache-2.0"
] | 2 | 2021-03-31T20:06:37.000Z | 2021-12-13T20:51:44.000Z | tests/unit/admin/views/test_squats.py | Dithn/warehouse | 953b77ecfc7dade203db423307539ea9d6115657 | [
"Apache-2.0"
] | 1 | 2020-07-31T17:19:53.000Z | 2020-07-31T17:19:53.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from warehouse.admin.squats import Squat
from warehouse.admin.views import squats as views
from ....common.db.packaging import ProjectFactory
class TestGetSquats:
def test_get_squats(self, db_request):
project_a = ProjectFactory()
project_b = ProjectFactory()
project_c = ProjectFactory()
squat = Squat(squattee=project_a, squatter=project_b)
reviewed_squat = Squat(squattee=project_a, squatter=project_c, reviewed=True)
db_request.db.add(squat)
db_request.db.add(reviewed_squat)
assert views.get_squats(db_request) == {"squats": [squat]}
class TestReviewSquat:
def test_review_squat(self, db_request):
squat = Squat(squattee=ProjectFactory(), squatter=ProjectFactory())
db_request.db.add(squat)
db_request.db.flush()
db_request.POST = {"id": squat.id}
db_request.route_path = lambda *a: "/the/redirect"
db_request.flash = lambda *a: None
views.review_squat(db_request)
db_request.db.flush()
assert squat.reviewed is True
| 33.708333 | 85 | 0.713226 |
aceb995f320cdb9020be61ab13b2fad0fc6e68fb | 2,832 | py | Python | stellargraph/layer/preprocessing_layer.py | xugangwu95/stellargraph | f26f7733235573f5def19da3329fc1d92fdd71ee | [
"Apache-2.0"
] | 1 | 2021-09-24T09:38:01.000Z | 2021-09-24T09:38:01.000Z | stellargraph/layer/preprocessing_layer.py | kjun9/stellargraph | 512e60a8f572a4bb432b0397a2b452251e167d8f | [
"Apache-2.0"
] | null | null | null | stellargraph/layer/preprocessing_layer.py | kjun9/stellargraph | 512e60a8f572a4bb432b0397a2b452251e167d8f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocessing as a layer in GCN. This is to ensure that the GCN model is differentiable in an end-to-end manner.
"""
from tensorflow.keras import backend as K
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow import keras
import numpy as np
class GraphPreProcessingLayer(Layer):
"""
This class implements the pre-processing of adjacency matrices in GCN. We implement it in tensorflow so that
while computing the saliency maps, we are able to calculate the gradients in an end-to-end way.
We currently only support this for tensorflow backend.
Args:
num_of_nodes (int pair): The number of nodes in the graph.
"""
def __init__(self, num_of_nodes, **kwargs):
if K.backend() != "tensorflow":
raise TypeError("Only tensorflow backend is currently supported.")
self.output_dims = (num_of_nodes, num_of_nodes)
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape)
def call(self, adj):
"""
The adjacency matrix pre-processing in tensorflow.
This function applies the matrix transformations on the adjacency matrix, which are required by GCN.
GCN requires that the input adjacency matrix should be symmetric, with self-loops, and normalized.
Args:
adj (Numpy array): the adjacency matrix to transform.
Returns:
The tensor of the transformed adjacency matrix.
"""
# Build a symmetric adjacency matrix.
adj_T = tf.transpose(adj)
adj = (
adj
+ tf.multiply(
adj_T, tf.where(adj_T > adj, tf.ones_like(adj), tf.zeros_like(adj))
)
- tf.multiply(
adj, tf.where(adj_T > adj, tf.ones_like(adj), tf.zeros_like(adj))
)
)
# Add self loops.
adj = adj + tf.linalg.diag(tf.ones(adj.shape[0]) - tf.diag_part(adj))
# Normalization
rowsum = tf.reduce_sum(adj, 1)
d_mat_inv_sqrt = tf.diag(tf.rsqrt(rowsum))
adj_normalized = tf.matmul(tf.matmul(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return adj_normalized
| 35.848101 | 112 | 0.661017 |
aceb99756b1105812af7225d711696748d56c7b2 | 3,250 | py | Python | openstack_dashboard/dashboards/admin/vg_snapshots/views.py | stackhpc/horizon | 0899f67657e0be62dd9e6be327c63bccb4607dc6 | [
"Apache-2.0"
] | 930 | 2015-01-04T08:06:03.000Z | 2022-03-13T18:47:13.000Z | openstack_dashboard/dashboards/admin/vg_snapshots/views.py | stackhpc/horizon | 0899f67657e0be62dd9e6be327c63bccb4607dc6 | [
"Apache-2.0"
] | 26 | 2015-02-23T16:37:31.000Z | 2020-07-02T08:37:41.000Z | openstack_dashboard/dashboards/admin/vg_snapshots/views.py | stackhpc/horizon | 0899f67657e0be62dd9e6be327c63bccb4607dc6 | [
"Apache-2.0"
] | 1,040 | 2015-01-01T18:48:28.000Z | 2022-03-19T08:35:18.000Z | # Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.vg_snapshots \
import tables as admin_tables
from openstack_dashboard.dashboards.admin.vg_snapshots \
import tabs as admin_tabs
from openstack_dashboard.dashboards.project.vg_snapshots \
import views as project_views
INDEX_URL = "horizon:admin:vg_snapshots:index"
class IndexView(tables.DataTableView):
table_class = admin_tables.GroupSnapshotsTable
page_title = _("Group Snapshots")
def get_data(self):
try:
vg_snapshots = api.cinder.group_snapshot_list(
self.request, {'all_tenants': 1})
except Exception:
vg_snapshots = []
exceptions.handle(self.request, _("Unable to retrieve "
"volume group snapshots."))
try:
groups = dict((g.id, g) for g
in api.cinder.group_list(self.request,
{'all_tenants': 1}))
except Exception:
groups = {}
exceptions.handle(self.request,
_("Unable to retrieve volume groups."))
# Gather our tenants to correlate against Group IDs
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve group snapshot project information.')
exceptions.handle(self.request, msg)
tenant_dict = dict((t.id, t) for t in tenants)
for vg_snapshot in vg_snapshots:
vg_snapshot.group = groups.get(vg_snapshot.group_id)
tenant_id = getattr(vg_snapshot, "project_id", None)
tenant = tenant_dict.get(tenant_id)
# NOTE: If horizon is using cinder API microversion below '3.58',
# it doesn't include any 'project id' information in group
# snapshot's object.
vg_snapshot.tenant_name = getattr(tenant, "name", None)
return vg_snapshots
class DetailView(project_views.DetailView):
tab_group_class = admin_tabs.DetailTabs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
table = admin_tables.GroupSnapshotsTable(self.request)
context["actions"] = table.render_row_actions(context["vg_snapshot"])
return context
@staticmethod
def get_redirect_url():
return reverse(INDEX_URL)
| 37.790698 | 78 | 0.652923 |
aceb9a190ae8c01ff73b3caa68523142412c84fc | 1,217 | py | Python | vtam/tests/test_cmd_sortreads.py | RaphaelHebert/vtam | 6cbc7e241f9aa4245f5fd000769b9765333d41c2 | [
"MIT"
] | 1 | 2020-11-26T11:25:02.000Z | 2020-11-26T11:25:02.000Z | vtam/tests/test_cmd_sortreads.py | RaphaelHebert/vtam | 6cbc7e241f9aa4245f5fd000769b9765333d41c2 | [
"MIT"
] | 25 | 2020-11-13T13:45:45.000Z | 2022-03-09T08:38:56.000Z | vtam/tests/test_cmd_sortreads.py | RaphaelHebert/vtam | 6cbc7e241f9aa4245f5fd000769b9765333d41c2 | [
"MIT"
] | 4 | 2021-03-25T18:18:52.000Z | 2022-01-08T10:37:39.000Z | from unittest import TestCase
from vtam import CommandSortReads
from vtam.utils.PathManager import PathManager
import filecmp
import os
import shutil
class TestCommandSortReads(TestCase):
@classmethod
def setUpClass(cls):
cls.test_path = PathManager.get_test_path()
cls.outdir_path = os.path.join(cls.test_path, 'outdir')
def setUp(self):
self.fastainfo = os.path.join(self.test_path, "test_files", "mergedinfo.tsv")
self.fastadir = os.path.join(self.test_path, "test_files", "merged")
self.sorted_dir = os.path.join(self.outdir_path, "sorted")
self.sorted_dir_bak = os.path.join(self.test_path, "test_files", "sorted")
def test_01(self):
CommandSortReads.main(fastainfo=self.fastainfo, fastadir=self.fastadir, sorteddir=self.sorted_dir)
self.assertTrue(filecmp.cmpfiles(self.sorted_dir, self.sorted_dir_bak, common=[
'sortedinfo.tsv', 'MFZR_14Ben01_Tpos1_1_fw_48_000.fasta', 'MFZR_14Ben01_Tpos1_1_fw_48_001.fasta',
'MFZR_14Ben01_Tpos1_1_fw_48_002.fasta', 'MFZR_14Ben01_Tpos1_1_fw_48_003.fasta'], shallow=True))
def tearDown(self):
shutil.rmtree(self.outdir_path, ignore_errors=True)
| 36.878788 | 109 | 0.723911 |
aceb9ac73cc7047eb123c37fae2c9d39218e3a6c | 27,788 | py | Python | aiida/orm/implementation/django/node.py | iriberri/aiida_core | c4a1ec5dac92ee62c59d39ca580bde449f3abf73 | [
"BSD-2-Clause"
] | null | null | null | aiida/orm/implementation/django/node.py | iriberri/aiida_core | c4a1ec5dac92ee62c59d39ca580bde449f3abf73 | [
"BSD-2-Clause"
] | null | null | null | aiida/orm/implementation/django/node.py | iriberri/aiida_core | c4a1ec5dac92ee62c59d39ca580bde449f3abf73 | [
"BSD-2-Clause"
] | 1 | 2018-12-21T11:10:09.000Z | 2018-12-21T11:10:09.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
import copy
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, transaction
from django.db.models import F
from aiida.backends.djsite.db.models import DbLink
from aiida.common.exceptions import (InternalError, ModificationNotAllowed,
NotExistent, UniquenessError)
from aiida.common.folders import RepositoryFolder
from aiida.common.links import LinkType
from aiida.common.utils import get_new_uuid, type_check
from aiida.orm.implementation.general.node import AbstractNode, _NO_DEFAULT, _HASH_EXTRA_KEY
from aiida.orm.implementation.django.computer import Computer
from aiida.orm.mixins import Sealable
from . import user as users
class Node(AbstractNode):
@classmethod
def get_subclass_from_uuid(cls, uuid):
from aiida.backends.djsite.db.models import DbNode
try:
node = DbNode.objects.get(uuid=uuid).get_aiida_class()
except ObjectDoesNotExist:
raise NotExistent("No entry with UUID={} found".format(uuid))
if not isinstance(node, cls):
raise NotExistent("UUID={} is not an instance of {}".format(
uuid, cls.__name__))
return node
@classmethod
def get_subclass_from_pk(cls, pk):
from aiida.backends.djsite.db.models import DbNode
try:
node = DbNode.objects.get(pk=pk).get_aiida_class()
except ObjectDoesNotExist:
raise NotExistent("No entry with pk= {} found".format(pk))
if not isinstance(node, cls):
raise NotExistent("pk= {} is not an instance of {}".format(
pk, cls.__name__))
return node
@classmethod
def query(cls, *args, **kwargs):
from aiida.backends.djsite.db.models import DbNode
if cls._plugin_type_string:
if not cls._plugin_type_string.endswith('.'):
raise InternalError("The plugin type string does not "
"finish with a dot??")
# If it is 'calculation.Calculation.', we want to filter
# for things that start with 'calculation.' and so on
plug_type = cls._plugin_type_string
# Remove the implementation.django or sqla part.
if plug_type.startswith('implementation.'):
plug_type = '.'.join(plug_type.split('.')[2:])
pre, sep, _ = plug_type[:-1].rpartition('.')
superclass_string = "".join([pre, sep])
return DbNode.aiidaobjects.filter(
*args, type__startswith=superclass_string, **kwargs)
else:
# Base Node class, with empty string
return DbNode.aiidaobjects.filter(*args, **kwargs)
def __init__(self, **kwargs):
from aiida.backends.djsite.db.models import DbNode
super(Node, self).__init__()
self._temp_folder = None
dbnode = kwargs.pop('dbnode', None)
# Set the internal parameters
# Can be redefined in the subclasses
self._init_internal_params()
if dbnode is not None:
if not isinstance(dbnode, DbNode):
raise TypeError("dbnode is not a DbNode instance")
if dbnode.pk is None:
raise ValueError("If cannot load an aiida.orm.Node instance "
"from an unsaved Django DbNode object.")
if kwargs:
raise ValueError("If you pass a dbnode, you cannot pass any "
"further parameter")
# If I am loading, I cannot modify it
self._to_be_stored = False
self._dbnode = dbnode
# If this is changed, fix also the importer
self._repo_folder = RepositoryFolder(section=self._section_name,
uuid=self.uuid)
# NO VALIDATION ON __init__ BY DEFAULT, IT IS TOO SLOW SINCE IT OFTEN
# REQUIRES MULTIPLE DB HITS
# try:
# # Note: the validation often requires to load at least one
# # attribute, and therefore it will take a lot of time
# # because it has to cache every attribute.
# self._validate()
# except ValidationError as e:
# raise DbContentError("The data in the DB with UUID={} is not "
# "valid for class {}: {}".format(
# uuid, self.__class__.__name__, e.message))
else:
# TODO: allow to get the user from the parameters
user = self._backend.users.get_automatic_user()
self._dbnode = DbNode(user=user.dbuser,
uuid=get_new_uuid(),
type=self._plugin_type_string)
self._to_be_stored = True
# As creating the temp folder may require some time on slow
# filesystems, we defer its creation
self._temp_folder = None
# Used only before the first save
self._attrs_cache = {}
# If this is changed, fix also the importer
self._repo_folder = RepositoryFolder(section=self._section_name,
uuid=self.uuid)
# Automatically set all *other* attributes, if possible, otherwise
# stop
self._set_with_defaults(**kwargs)
@classmethod
def query(cls, *args, **kwargs):
from aiida.backends.djsite.db.models import DbNode
if cls._plugin_type_string:
if not cls._plugin_type_string.endswith('.'):
raise InternalError("The plugin type string does not "
"finish with a dot??")
# If it is 'calculation.Calculation.', we want to filter
# for things that start with 'calculation.' and so on
plug_type = cls._plugin_type_string
# Remove the implementation.django or sqla part.
if plug_type.startswith('implementation.'):
plug_type = '.'.join(plug_type.split('.')[2:])
pre, sep, _ = plug_type[:-1].rpartition('.')
superclass_string = "".join([pre, sep])
return DbNode.aiidaobjects.filter(
*args, type__startswith=superclass_string, **kwargs)
else:
# Base Node class, with empty string
return DbNode.aiidaobjects.filter(*args, **kwargs)
@property
def type(self):
return self._dbnode.type
@property
def nodeversion(self):
return self._dbnode.nodeversion
@property
def ctime(self):
return self._dbnode.ctime
@property
def mtime(self):
return self._dbnode.mtime
def _get_db_label_field(self):
return self._dbnode.label
def _update_db_label_field(self, field_value):
self._dbnode.label = field_value
if self.is_stored:
with transaction.atomic():
self._dbnode.save()
self._increment_version_number_db()
def _get_db_description_field(self):
return self._dbnode.description
def _update_db_description_field(self, field_value):
self._dbnode.description = field_value
if self.is_stored:
with transaction.atomic():
self._dbnode.save()
self._increment_version_number_db()
def _replace_dblink_from(self, src, label, link_type):
try:
self._add_dblink_from(src, label, link_type)
except UniquenessError:
# I have to replace the link; I do it within a transaction
with transaction.atomic():
self._remove_dblink_from(label)
self._add_dblink_from(src, label, link_type)
def _remove_dblink_from(self, label):
DbLink.objects.filter(output=self._dbnode, label=label).delete()
def _add_dblink_from(self, src, label=None, link_type=LinkType.UNSPECIFIED):
from aiida.orm.querybuilder import QueryBuilder
if not isinstance(src, Node):
raise ValueError("src must be a Node instance")
if self.uuid == src.uuid:
raise ValueError("Cannot link to itself")
if not self.is_stored:
raise ModificationNotAllowed(
"Cannot call the internal _add_dblink_from if the "
"destination node is not stored")
if src._to_be_stored:
raise ModificationNotAllowed(
"Cannot call the internal _add_dblink_from if the "
"source node is not stored")
if link_type is LinkType.CREATE or link_type is LinkType.INPUT:
# Check for cycles. This works if the transitive closure is enabled; if it
# isn't, this test will never fail, but then having a circular link is not
# meaningful but does not pose a huge threat
#
# I am linking src->self; a loop would be created if a DbPath exists already
# in the TC table from self to src
if QueryBuilder().append(
Node, filters={'id': self.pk}, tag='parent').append(
Node, filters={'id': src.pk}, tag='child', descendant_of='parent').count() > 0:
raise ValueError(
"The link you are attempting to create would generate a loop")
if label is None:
autolabel_idx = 1
existing_from_autolabels = list(DbLink.objects.filter(
output=self._dbnode,
label__startswith="link_").values_list('label', flat=True))
while "link_{}".format(autolabel_idx) in existing_from_autolabels:
autolabel_idx += 1
safety_counter = 0
while True:
safety_counter += 1
if safety_counter > 100:
# Well, if you have more than 100 concurrent addings
# to the same node, you are clearly doing something wrong...
raise InternalError("Hey! We found more than 100 concurrent"
" adds of links "
"to the same nodes! Are you really doing that??")
try:
self._do_create_link(src, "link_{}".format(autolabel_idx), link_type)
break
except UniquenessError:
# Retry loop until you find a new loop
autolabel_idx += 1
else:
self._do_create_link(src, label, link_type)
def _do_create_link(self, src, label, link_type):
sid = None
try:
# transactions are needed here for Postgresql:
# https://docs.djangoproject.com/en/1.5/topics/db/transactions/#handling-exceptions-within-postgresql-transactions
sid = transaction.savepoint()
DbLink.objects.create(input=src._dbnode, output=self._dbnode,
label=label, type=link_type.value)
transaction.savepoint_commit(sid)
except IntegrityError as e:
transaction.savepoint_rollback(sid)
raise UniquenessError("There is already a link with the same "
"name (raw message was {})"
"".format(e.message))
def _get_db_input_links(self, link_type):
from aiida.backends.djsite.db.models import DbLink
link_filter = {'output': self._dbnode}
if link_type is not None:
link_filter['type'] = link_type.value
return [(i.label, i.input.get_aiida_class()) for i in
DbLink.objects.filter(**link_filter).distinct()]
def _get_db_output_links(self, link_type):
from aiida.backends.djsite.db.models import DbLink
link_filter = {'input': self._dbnode}
if link_type is not None:
link_filter['type'] = link_type.value
return ((i.label, i.output.get_aiida_class()) for i in
DbLink.objects.filter(**link_filter).distinct())
def get_computer(self):
"""
Get the computer associated to the node.
:return: the Computer object or None.
"""
if self._dbnode.dbcomputer is None:
return None
else:
return Computer(dbcomputer=self._dbnode.dbcomputer)
def _set_db_computer(self, computer):
from aiida.backends.djsite.db.models import DbComputer
self._dbnode.dbcomputer = DbComputer.get_dbcomputer(computer)
def _set_db_attr(self, key, value):
"""
Set the value directly in the DB, without checking if it is stored, or
using the cache.
DO NOT USE DIRECTLY.
:param str key: key name
:param value: its value
"""
from aiida.backends.djsite.db.models import DbAttribute
DbAttribute.set_value_for_node(self._dbnode, key, value)
self._increment_version_number_db()
def _del_db_attr(self, key):
from aiida.backends.djsite.db.models import DbAttribute
if not DbAttribute.has_key(self._dbnode, key):
raise AttributeError("DbAttribute {} does not exist".format(
key))
DbAttribute.del_value_for_node(self._dbnode, key)
self._increment_version_number_db()
def _get_db_attr(self, key):
from aiida.backends.djsite.db.models import DbAttribute
return DbAttribute.get_value_for_node(
dbnode=self._dbnode, key=key)
def _set_db_extra(self, key, value, exclusive=False):
from aiida.backends.djsite.db.models import DbExtra
DbExtra.set_value_for_node(self._dbnode, key, value,
stop_if_existing=exclusive)
self._increment_version_number_db()
def _reset_db_extras(self, new_extras):
raise NotImplementedError("Reset of extras has not been implemented"
"for Django backend.")
def _get_db_extra(self, key):
from aiida.backends.djsite.db.models import DbExtra
return DbExtra.get_value_for_node(dbnode=self._dbnode, key=key)
def _del_db_extra(self, key):
from aiida.backends.djsite.db.models import DbExtra
if not DbExtra.has_key(self._dbnode, key):
raise AttributeError("DbExtra {} does not exist".format(
key))
return DbExtra.del_value_for_node(self._dbnode, key)
self._increment_version_number_db()
def _db_iterextras(self):
from aiida.backends.djsite.db.models import DbExtra
extraslist = DbExtra.list_all_node_elements(self._dbnode)
for e in extraslist:
yield (e.key, e.getvalue())
def _db_iterattrs(self):
from aiida.backends.djsite.db.models import DbAttribute
all_attrs = DbAttribute.get_all_values_for_node(self._dbnode)
for attr in all_attrs:
yield (attr, all_attrs[attr])
def _db_attrs(self):
# Note: I "duplicate" the code from iterattrs and reimplement it
# here, rather than
# calling iterattrs from here, because iterattrs is slow on each call
# since it has to call .getvalue(). To improve!
from aiida.backends.djsite.db.models import DbAttribute
attrlist = DbAttribute.list_all_node_elements(self._dbnode)
for attr in attrlist:
yield attr.key
def add_comment(self, content, user=None):
from aiida.backends.djsite.db.models import DbComment
if not self.is_stored:
raise ModificationNotAllowed("Comments can be added only after "
"storing the node")
if user is None:
user = self.backend.users.get_automatic_user()
return DbComment.objects.create(dbnode=self._dbnode,
user=user.dbuser,
content=content).id
def get_comment_obj(self, comment_id=None, user=None):
from aiida.backends.djsite.db.models import DbComment
import operator
from django.db.models import Q
query_list = []
# If an id is specified then we add it to the query
if comment_id is not None:
query_list.append(Q(pk=comment_id))
# If a user is specified then we add it to the query
if user is not None:
query_list.append(Q(user=user))
dbcomments = DbComment.objects.filter(
reduce(operator.and_, query_list))
comments = []
from aiida.orm.implementation.django.comment import Comment
for dbcomment in dbcomments:
comments.append(Comment(dbcomment=dbcomment))
return comments
def get_comments(self, pk=None):
from aiida.backends.djsite.db.models import DbComment
if pk is not None:
try:
correct = all([isinstance(_, int) for _ in pk])
if not correct:
raise ValueError('pk must be an integer or a list of integers')
except TypeError:
if not isinstance(pk, int):
raise ValueError('pk must be an integer or a list of integers')
return list(DbComment.objects.filter(
dbnode=self._dbnode, pk=pk).order_by('pk').values(
'pk', 'user__email', 'ctime', 'mtime', 'content'))
return list(DbComment.objects.filter(dbnode=self._dbnode).order_by(
'pk').values('pk', 'user__email', 'ctime', 'mtime', 'content'))
def _get_dbcomments(self, pk=None):
from aiida.backends.djsite.db.models import DbComment
if pk is not None:
try:
correct = all([isinstance(_, int) for _ in pk])
if not correct:
raise ValueError('pk must be an integer or a list of integers')
return list(DbComment.objects.filter(dbnode=self._dbnode, pk__in=pk).order_by('pk'))
except TypeError:
if not isinstance(pk, int):
raise ValueError('pk must be an integer or a list of integers')
return list(DbComment.objects.filter(dbnode=self._dbnode, pk=pk).order_by('pk'))
return list(DbComment.objects.filter(dbnode=self._dbnode).order_by('pk'))
def _update_comment(self, new_field, comment_pk, user):
from aiida.backends.djsite.db.models import DbComment
comment = list(DbComment.objects.filter(dbnode=self._dbnode,
pk=comment_pk, user=user))[0]
if not isinstance(new_field, basestring):
raise ValueError("Non string comments are not accepted")
if not comment:
raise NotExistent("Found no comment for user {} and pk {}".format(
user, comment_pk))
comment.content = new_field
comment.save()
def _remove_comment(self, comment_pk, user):
from aiida.backends.djsite.db.models import DbComment
comment = DbComment.objects.filter(dbnode=self._dbnode, pk=comment_pk)[0]
comment.delete()
def _increment_version_number_db(self):
from aiida.backends.djsite.db.models import DbNode
# I increment the node number using a filter
self._dbnode.nodeversion = F('nodeversion') + 1
self._dbnode.save()
# This reload internally the node of self._dbnode
# Note: I have to reload the object (to have the right values in memory,
# otherwise I only get the Django Field F object as a result!
self._dbnode = DbNode.objects.get(pk=self._dbnode.pk)
@property
def uuid(self):
return unicode(self._dbnode.uuid)
@property
def id(self):
return self._dbnode.id
@property
def process_type(self):
return self._dbnode.process_type
@property
def dbnode(self):
# I also update the internal _dbnode variable, if it was saved
# from aiida.backends.djsite.db.models import DbNode
# if self.is_stored:
# self._dbnode = DbNode.objects.get(pk=self._dbnode.pk)
return self._dbnode
def _db_store_all(self, with_transaction=True, use_cache=None):
"""
Store the node, together with all input links, if cached, and also the
linked nodes, if they were not stored yet.
:parameter with_transaction: if False, no transaction is used. This
is meant to be used ONLY if the outer calling function has already
a transaction open!
"""
from django.db import transaction
from aiida.common.utils import EmptyContextManager
if with_transaction:
context_man = transaction.atomic()
else:
context_man = EmptyContextManager()
with context_man:
# Always without transaction: either it is the context_man here,
# or it is managed outside
self._store_input_nodes()
self.store(with_transaction=False, use_cache=use_cache)
self._store_cached_input_links(with_transaction=False)
return self
def get_user(self):
return self._backend.users.from_dbmodel(self._dbnode.user)
def set_user(self, user):
type_check(user, users.DjangoUser)
self._dbnode.user = user.dbuser
def _store_cached_input_links(self, with_transaction=True):
"""
Store all input links that are in the local cache, transferring them
to the DB.
:note: This can be called only if all parents are already stored.
:note: Links are stored only after the input nodes are stored. Moreover,
link storage is done in a transaction, and if one of the links
cannot be stored, an exception is raised and *all* links will remain
in the cache.
:note: This function can be called only after the node is stored.
After that, it can be called multiple times, and nothing will be
executed if no links are still in the cache.
:parameter with_transaction: if False, no transaction is used. This
is meant to be used ONLY if the outer calling function has already
a transaction open!
"""
from django.db import transaction
from aiida.common.utils import EmptyContextManager
if with_transaction:
context_man = transaction.atomic()
else:
context_man = EmptyContextManager()
if not self.is_stored:
raise ModificationNotAllowed(
"Node with pk= {} is not stored yet".format(self.pk))
with context_man:
# This raises if there is an unstored node.
self._check_are_parents_stored()
# I have to store only those links where the source is already
# stored
links_to_store = list(self._inputlinks_cache.keys())
for label in links_to_store:
src, link_type = self._inputlinks_cache[label]
self._add_dblink_from(src, label, link_type)
# If everything went smoothly, clear the entries from the cache.
# I do it here because I delete them all at once if no error
# occurred; otherwise, links will not be stored and I
# should not delete them from the cache (but then an exception
# would have been raised, and the following lines are not executed)
self._inputlinks_cache.clear()
def _db_store(self, with_transaction=True):
"""
Store a new node in the DB, also saving its repository directory
and attributes.
After being called attributes cannot be
changed anymore! Instead, extras can be changed only AFTER calling
this store() function.
:note: After successful storage, those links that are in the cache, and
for which also the parent node is already stored, will be
automatically stored. The others will remain unstored.
:parameter with_transaction: if False, no transaction is used. This
is meant to be used ONLY if the outer calling function has already
a transaction open!
:param bool use_cache: Whether I attempt to find an equal node in the DB.
"""
# TODO: This needs to be generalized, allowing for flexible methods
# for storing data and its attributes.
from django.db import transaction
from aiida.common.utils import EmptyContextManager
from aiida.common.exceptions import ValidationError
from aiida.backends.djsite.db.models import DbAttribute
import aiida.orm.autogroup
if with_transaction:
context_man = transaction.atomic()
else:
context_man = EmptyContextManager()
# I save the corresponding django entry
# I set the folder
# NOTE: I first store the files, then only if this is successful,
# I store the DB entry. In this way,
# I assume that if a node exists in the DB, its folder is in place.
# On the other hand, periodically the user might need to run some
# bookkeeping utility to check for lone folders.
self._repository_folder.replace_with_folder(
self._get_temp_folder().abspath, move=True, overwrite=True)
# I do the transaction only during storage on DB to avoid timeout
# problems, especially with SQLite
try:
with context_man:
# Save the row
self._dbnode.save()
# Save its attributes 'manually' without incrementing
# the version for each add.
DbAttribute.reset_values_for_node(self._dbnode,
attributes=self._attrs_cache,
with_transaction=False)
# This should not be used anymore: I delete it to
# possibly free memory
del self._attrs_cache
self._temp_folder = None
self._to_be_stored = False
# Here, I store those links that were in the cache and
# that are between stored nodes.
self._store_cached_input_links()
# This is one of the few cases where it is ok to do a 'global'
# except, also because I am re-raising the exception
except:
# I put back the files in the sandbox folder since the
# transaction did not succeed
self._get_temp_folder().replace_with_folder(
self._repository_folder.abspath, move=True, overwrite=True)
raise
from aiida.backends.djsite.db.models import DbExtra
# I store the hash without cleaning and without incrementing the nodeversion number
DbExtra.set_value_for_node(self._dbnode, _HASH_EXTRA_KEY, self.get_hash())
return self
| 41.35119 | 126 | 0.60393 |
aceb9c3f438c7e2f3d01a8c31683d48296654694 | 2,377 | py | Python | designate-8.0.0/designate/backend/impl_pdns4.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | designate-8.0.0/designate/backend/impl_pdns4.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | designate-8.0.0/designate/backend/impl_pdns4.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import urllib
import requests
from oslo_log import log as logging
from oslo_config import cfg
from designate import exceptions
from designate.backend import base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PDNS4Backend(base.Backend):
__plugin_name__ = 'pdns4'
__backend_status__ = 'release-compatible'
def __init__(self, target):
super(PDNS4Backend, self).__init__(target)
self.api_endpoint = self.options.get('api_endpoint')
self.api_token = self.options.get('api_token')
def _build_url(self, zone=''):
r_url = urllib.parse.urlparse(self.api_endpoint)
return "%s://%s/api/v1/servers/localhost/zones%s%s" % (
r_url.scheme, r_url.netloc, '/' if zone else '', zone)
def create_zone(self, context, zone):
"""Create a DNS zone"""
masters = \
['%s:%d' % (master.host, master.port) for master in self.masters]
data = {
"name": zone.name,
"kind": "slave",
"masters": masters,
}
headers = {
"X-API-Key": self.api_token
}
try:
requests.post(
self._build_url(),
json=data,
headers=headers
).raise_for_status()
except requests.HTTPError as e:
raise exceptions.Backend(e)
def delete_zone(self, context, zone):
"""Delete a DNS zone"""
headers = {
"X-API-Key": self.api_token
}
try:
requests.delete(
self._build_url(zone.name),
headers=headers
).raise_for_status()
except requests.HTTPError as e:
raise exceptions.Backend(e)
| 28.638554 | 77 | 0.620951 |
aceb9c55f32729790535d6ded82481b39f83e595 | 1,590 | py | Python | src/ecogenie/.ipynb_checkpoints/model-checkpoint.py | PeetsB/ecogenie | 1ad24600b537e62e5fa2495fb7e6b1ba22439653 | [
"MIT"
] | null | null | null | src/ecogenie/.ipynb_checkpoints/model-checkpoint.py | PeetsB/ecogenie | 1ad24600b537e62e5fa2495fb7e6b1ba22439653 | [
"MIT"
] | null | null | null | src/ecogenie/.ipynb_checkpoints/model-checkpoint.py | PeetsB/ecogenie | 1ad24600b537e62e5fa2495fb7e6b1ba22439653 | [
"MIT"
] | null | null | null | import os
from os import path
import pandas as pd
import numpy as np
def check_dirs(working_directories):
notebooks_dir = os.path.dirname(os.path.realpath('__file__'))
root_dir = path.abspath(path.join(notebooks_dir, '..'))
critical_dirs = working_directories['critical_dirs'].values.tolist()
list_and_check_dirs(root_dir, critical_dirs, working_directories)
# from: https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python
def list_and_check_dirs(startpath, critical_dirs, working_directories):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
if os.path.basename(root) in critical_dirs:
# print('{}/{}/'.format(os.path.dirname(root), os.path.basename(root)))
working_directories.loc[working_directories['critical_dirs'] == os.path.basename(root),
'absolute_path'] = os.path.dirname(root) +'/' + os.path.basename(root)
subindent = ' ' * 4 * (level + 1)
# missing directories
# ensure that empty fields are set to NaN
working_directories['absolute_path'].replace('', np.nan, inplace=True)
#create a list of indices
missing_dirs=working_directories.iloc[np.where(pd.isnull(working_directories))].index.tolist()
for i in missing_dirs:
print('{} {}/'.format('Missing critical directory: ', working_directories.loc[i,'critical_dirs']))
if not missing_dirs:
print('All critical directories exist and are set in data frame: working_directories')
| 46.764706 | 106 | 0.696855 |
aceb9d5de85ce4cc10e6243e09acfeb729808963 | 2,954 | py | Python | sqllineage/core/handlers/source.py | eeroel/sqllineage | 568b76eee83c390639a017167b2ec1a24414277e | [
"MIT"
] | null | null | null | sqllineage/core/handlers/source.py | eeroel/sqllineage | 568b76eee83c390639a017167b2ec1a24414277e | [
"MIT"
] | null | null | null | sqllineage/core/handlers/source.py | eeroel/sqllineage | 568b76eee83c390639a017167b2ec1a24414277e | [
"MIT"
] | null | null | null | import re
from typing import Union
from sqlparse.sql import Function, Identifier, IdentifierList, Parenthesis, Token
from sqllineage.core.handlers.base import NextTokenBaseHandler
from sqllineage.exceptions import SQLLineageException
from sqllineage.holders import SubQueryLineageHolder
from sqllineage.models import SubQuery, Table
class SourceHandler(NextTokenBaseHandler):
SOURCE_TABLE_TOKENS = (
r"FROM",
# inspired by https://github.com/andialbrecht/sqlparse/blob/master/sqlparse/keywords.py
r"((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?|(CROSS\s+|NATURAL\s+)?)?JOIN",
)
def _indicate(self, token: Token) -> bool:
# SELECT trim(BOTH ' ' FROM ' abc '); Here FROM is not a source table flag
return any(
re.match(regex, token.normalized) for regex in self.SOURCE_TABLE_TOKENS
) and not isinstance(token.parent.parent, Function)
def _handle(self, token: Token, holder: SubQueryLineageHolder) -> None:
if isinstance(token, Identifier):
if isinstance(token.token_first(skip_cm=True), Parenthesis):
# SELECT col1 FROM (SELECT col2 FROM tab1) dt, the subquery will be parsed as Identifier
# and this Identifier's get_real_name method would return alias name dt
# referring https://github.com/andialbrecht/sqlparse/issues/218 for further information
holder.add_read(
SubQuery.of(token.token_first(skip_cm=True), token.get_real_name())
)
else:
holder.add_read(self._get_table_or_subquery(token, holder))
elif isinstance(token, IdentifierList):
# This is to support join in ANSI-89 syntax
for token in token.tokens:
# when real name and alias name are the same, it means subquery here
if (
isinstance(token, Identifier)
and token.get_real_name() != token.get_alias()
):
holder.add_read(self._get_table_or_subquery(token, holder))
elif isinstance(token, Parenthesis):
# SELECT col1 FROM (SELECT col2 FROM tab1), the subquery will be parsed as Parenthesis
# This syntax without alias for subquery is invalid in MySQL, while valid for SparkSQL
holder.add_read(SubQuery.of(token, None))
else:
raise SQLLineageException(
"An Identifier is expected, got %s[value: %s] instead."
% (type(token).__name__, token)
)
@classmethod
def _get_table_or_subquery(
cls, token: Identifier, holder: SubQueryLineageHolder
) -> Union[SubQuery, Table]:
cte_dict = {s.alias: s for s in holder.cte}
return (
Table.of(token)
if "." in token.value
else cte_dict.get(token.get_real_name(), Table.of(token))
)
| 45.446154 | 104 | 0.635071 |
aceb9dda2b6b9a82f21064ab67f8c6dd78d16e7a | 109 | py | Python | first/app/useCases/services/interfaces/userCases.py | OscarSilvaOfficial/Ifood-Challenges | d97290b26ca4dec62e92823fe2c6e27a9e4c8248 | [
"MIT"
] | null | null | null | first/app/useCases/services/interfaces/userCases.py | OscarSilvaOfficial/Ifood-Challenges | d97290b26ca4dec62e92823fe2c6e27a9e4c8248 | [
"MIT"
] | null | null | null | first/app/useCases/services/interfaces/userCases.py | OscarSilvaOfficial/Ifood-Challenges | d97290b26ca4dec62e92823fe2c6e27a9e4c8248 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
class UseCases(ABC):
@abstractmethod
def get(self, id):
pass
| 13.625 | 35 | 0.697248 |
aceb9e9892c7c0265ae22a3f33d0d3d7305cc4fc | 1,182 | py | Python | setup.py | TTitcombe/python-obfuscator | cdc766d3449e749176d09d40cee4beb92aced6b2 | [
"MIT"
] | 61 | 2020-12-06T15:36:47.000Z | 2022-03-28T16:52:08.000Z | setup.py | TTitcombe/python-obfuscator | cdc766d3449e749176d09d40cee4beb92aced6b2 | [
"MIT"
] | 11 | 2020-12-25T21:46:05.000Z | 2022-03-03T15:48:21.000Z | setup.py | TTitcombe/python-obfuscator | cdc766d3449e749176d09d40cee4beb92aced6b2 | [
"MIT"
] | 19 | 2020-12-25T17:02:16.000Z | 2022-03-27T14:05:31.000Z | from distutils.core import setup
import os.path
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="python_obfuscator",
packages=setuptools.find_packages(),
version="0.0.2",
license="MIT",
description="It's a python obfuscator.",
author="David Teather",
author_email="contact.davidteather@gmail.com",
url="https://github.com/davidteather/python-obfuscator",
long_description=long_description,
long_description_content_type="text/markdown",
download_url="https://github.com/davidteather/python-obfuscator/tarball/master",
keywords=["obfuscator"],
install_requires=["regex"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
entry_points={"console_scripts": ["pyobfuscate=python_obfuscator.cli:cli"]},
)
| 34.764706 | 84 | 0.667513 |
aceb9f33a826044fa4f17fc7e93b9a0fdfd65af3 | 923 | py | Python | PrivateMonitorClient/src/CronTools/CronBackup.py | ajax-2/PrivateMonitor | 9ab79ad08ba820bdb4c292676299bdeb2af7f725 | [
"Apache-2.0"
] | null | null | null | PrivateMonitorClient/src/CronTools/CronBackup.py | ajax-2/PrivateMonitor | 9ab79ad08ba820bdb4c292676299bdeb2af7f725 | [
"Apache-2.0"
] | null | null | null | PrivateMonitorClient/src/CronTools/CronBackup.py | ajax-2/PrivateMonitor | 9ab79ad08ba820bdb4c292676299bdeb2af7f725 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import multiprocessing
import time
import os
from src.info.LocalInfo import LocalInfo
import threading
li = LocalInfo()
# 备份数据
class CronBackup(object):
# 审计备份
@staticmethod
def audit_backup():
if not os.path.isdir("/home/.audit"):
os.mkdir("/home/.audit")
for num in range(10, 0, -1):
if os.path.isfile("/home/.audit/audit.%s" % (num - 1)):
os.system("mv -f /home/.audit/audit.%s /home/.audit/audit.%s" % (num - 1, num))
os.system("tar -zcf /home/.audit/audit.0 %s" % li.audit_log_dir)
# 开始备份
@staticmethod
def exec_backup():
while True:
t1 = threading.Thread(target=CronBackup.audit_backup)
t1.start()
t1.join()
time.sleep(li.backup_time)
# 入口
@staticmethod
def start():
multiprocessing.Process(target=CronBackup.exec_backup).start()
| 24.289474 | 95 | 0.590466 |
aceb9fd33227ce0f5bbec37e70caa6576e0f984f | 3,767 | py | Python | tfdet/model/postprocess/yolo.py | Burf/tfdetection | 658e67d6db71e04bda2965d5a5d506d304ab8ad6 | [
"Apache-2.0"
] | null | null | null | tfdet/model/postprocess/yolo.py | Burf/tfdetection | 658e67d6db71e04bda2965d5a5d506d304ab8ad6 | [
"Apache-2.0"
] | null | null | null | tfdet/model/postprocess/yolo.py | Burf/tfdetection | 658e67d6db71e04bda2965d5a5d506d304ab8ad6 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tfdet.core.bbox import yolo2bbox
from tfdet.core.util import map_fn
def filter_detection(score, logit, regress, anchors, proposal_count = 100, iou_threshold = 0.3, score_threshold = 0.7, soft_nms = False, clip_ratio = 16 / 1000):
n_class = tf.keras.backend.int_shape(logit)[-1]
logit = score * logit
score = tf.reduce_max(logit, axis = -1, keepdims = True)
label = tf.argmax(logit, axis = -1)
valid_indices = tf.where(tf.logical_and(score_threshold <= score, tf.expand_dims(0 < label, axis = -1)))[:, 0]
logit = tf.gather(logit, valid_indices)
regress = tf.gather(regress, valid_indices)
anchors = tf.gather(anchors, valid_indices)
regress = yolo2bbox(anchors, regress, clip_ratio)
regress = tf.clip_by_value(regress, 0, 1)
x1, y1, x2, y2 = tf.split(regress, 4, axis = -1)
transfom_regress = tf.concat([y1, x1, y2, x2], axis = -1)
soft_nms_sigma = soft_nms
if not isinstance(soft_nms, float):
soft_nms_sigma = 0.5 if soft_nms else 0.
def _yolo_detection(class_score, label):
indices = tf.image.non_max_suppression_with_scores(transfom_regress, class_score, max_output_size = proposal_count, iou_threshold = iou_threshold, soft_nms_sigma = soft_nms_sigma)[0]
label = tf.gather(label, indices)
indices = tf.stack([tf.cast(indices, label.dtype), label], axis = -1)
return indices
indices = []
for c in range(1, n_class):
class_score = logit[..., c]
labels = c * tf.ones([tf.shape(class_score)[0]], dtype = tf.int64)
indices.append(_yolo_detection(class_score, labels))
indices = tf.concat(indices, axis = 0)
class_score = tf.gather_nd(logit, indices)
top_indices = tf.nn.top_k(class_score, tf.minimum(proposal_count, tf.shape(class_score)[0]), sorted = True).indices
indices = tf.gather(indices[:, 0], top_indices)
logit = tf.gather(logit, indices)
proposal = tf.gather(regress, indices)
pad_count = tf.maximum(proposal_count - tf.shape(proposal)[0], 0)
logit = tf.pad(logit, [[0, pad_count], [0, 0]])
proposal = tf.pad(proposal, [[0, pad_count], [0, 0]])
logit = tf.reshape(logit, [proposal_count, n_class])
proposal = tf.reshape(proposal, [proposal_count, 4])
return logit, proposal
class FilterDetection(tf.keras.layers.Layer):
def __init__(self, proposal_count = 100, iou_threshold = 0.3, score_threshold = 0.7, soft_nms = False, batch_size = 1, clip_ratio = 16 / 1000, **kwargs):
super(FilterDetection, self).__init__(**kwargs)
self.proposal_count = proposal_count
self.iou_threshold = iou_threshold
self.score_threshold = score_threshold
self.soft_nms = soft_nms
self.batch_size = batch_size
self.clip_ratio = clip_ratio
def call(self, inputs):
score, logits, regress, anchors = inputs
anchors = tf.tile(tf.expand_dims(anchors, axis = 0), [tf.shape(logits)[0], 1, 1])
out = map_fn(filter_detection, score, logits, regress, anchors, dtype = (logits.dtype, regress.dtype), batch_size = self.batch_size,
proposal_count = self.proposal_count, iou_threshold = self.iou_threshold, score_threshold = self.score_threshold, soft_nms = self.soft_nms, clip_ratio = self.clip_ratio)
return out
def get_config(self):
config = super(FilterDetection, self).get_config()
config["proposal_count"] = self.proposal_count
config["iou_threshold"] = self.iou_threshold
config["score_threshold"] = self.score_threshold
config["soft_nms"] = self.soft_nms
config["batch_size"] = self.batch_size
config["clip_ratio"] = self.clip_ratio
return config
| 48.922078 | 190 | 0.673746 |
aceb9fd573a8e21d61cb472a0284239f4b1f739f | 25,646 | py | Python | fyrd/batch_systems/lsf.py | mdguerreiro/fyrd | 0bdd55d17d4a044edce793ca3179fbba89b68afa | [
"MIT"
] | 1 | 2020-04-17T01:05:24.000Z | 2020-04-17T01:05:24.000Z | fyrd/batch_systems/lsf.py | mdguerreiro/fyrd | 0bdd55d17d4a044edce793ca3179fbba89b68afa | [
"MIT"
] | null | null | null | fyrd/batch_systems/lsf.py | mdguerreiro/fyrd | 0bdd55d17d4a044edce793ca3179fbba89b68afa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
LSF parsing functions.
"""
import os as _os
import re as _re
import sys as _sys
import pwd as _pwd # Used to get usernames for queue
import datetime as _dt
from six import text_type as _txt
from six import string_types as _str
from six import integer_types as _int
import Pyro4
from .. import run as _run
from .. import conf as _conf
from .. import logme as _logme
from .. import ClusterError as _ClusterError
from .. import script_runners as _scrpts
from .. import submission_scripts as _sscrpt
from .base import BatchSystemClient, BatchSystemServer
_Script = _sscrpt.Script
SUFFIX = 'lsf'
# Define lsf-to-slurm mappings
LSF_SLURM_STATES = {
'PEND': 'pending', # PEND: Pending
'PROV': 'pending', # PROV: Proven state
'PSUSP': 'suspended', # PSUSP: Suspended (pending)
'RUN': 'running', # RUN: Running
'USUSP': 'suspended', # USUSP: Suspended while running
'SSUSP': 'suspended', # SSUSP: Suspended by LSF
'DONE': 'completed', # DONE: Done finished
'EXIT': 'failed', # EXIT: Failure (exit code != 0)
'UNKWN': 'failed', # UNKWN: Unknown
'WAIT': 'held', # WAIT: Waiting for others (chunk job queue)
'ZOMBI': 'killed' # ZOMBI: In zombi state
}
@Pyro4.expose
class LSFServer(BatchSystemServer):
NAME = 'lsf'
def metrics(self, job_id=None):
"""Iterator to get metrics from LSF system.
Parameters
----------
job_id: str, optional
Job ID to filter the queue with
Yields
------
line : str
"""
_logme.log('Getting job metrics', 'debug')
# No data for the following tags in LSF:
# 'AveCPUFreq', 'AveDiskRead', 'AveDiskWrite', 'ConsumedEnergy'
# So we fill them with None.
# Use 'exit_code' as None flag
none = 'user'
fwdth = 400 # Used for fixed-width parsing of bjobs
fields = (
'jobid', 'queue', 'exec_host', 'nexec_host', 'effective_resreq',
none, none, none, 'mem', none,
'submit_time', 'start_time', 'finish_time', 'run_time'
)
time_tags = [10, 11, 12]
elapsed_tag = 13
flen = len(fields)
# Arguments used for bjobs:
# - noheader: remove header from 1st row
# - a: show jobs in all states
# - X: display uncondensed output for hosts
# - o: customized output formats
# - jobid: get metrics only for that job
#
qargs = [
'bjobs', '-noheader', '-a', '-X', '-o',
'"{}"'.format(' '.join(['{0}:{1}'.format(field, fwdth)
for field in fields]))
]
if job_id:
qargs.append(' {}'.format(job_id))
try:
bjobs = [
[k[i:i+fwdth].strip() if field != none else "Not Available"
for i, field in zip(range(0, fwdth*flen, fwdth), fields)]
for k in _run.cmd(qargs)[1].split('\n')
]
except Exception as e:
_logme.log('Error running bjobs to get the metrics: {}'
.format(str(e)), 'error')
bjobs = []
# Normalize Time Tags from LSF format to SLURM format
for idx, job in enumerate(bjobs):
for tag in time_tags:
job[tag] = self.normalize_time(job[tag])
job[elapsed_tag] = self.normalize_elapsed(job[elapsed_tag])
bjobs[idx] = tuple(job)
# Yield metric iterator
for line in bjobs:
yield line
###########################################################################
# Functionality Test #
###########################################################################
def queue_test(self, warn=True):
"""Check that LSF can be used.
Just looks for bsub and bjobs.
Parameters
----------
warn : bool
log a warning on fail
Returns
-------
batch_system_functional : bool
"""
log_level = 'error' if warn else 'debug'
bsub = _conf.get_option('queue', 'bsub')
if (bsub is not None and _os.path.dirname(bsub)
and not _run.is_exe(bsub)):
_logme.log(
'Cannot use LSF as sbatch path set in conf to {0}'
.format(bsub) + ' but that path is not an executable',
log_level
)
return False
bsub = bsub if bsub else 'bsub'
bsub = _run.which(bsub) if not _os.path.dirname(bsub) else bsub
if not bsub:
_logme.log(
'Cannot use LSF as cannot find bsub', log_level
)
return False
qpath = _os.path.dirname(bsub)
bjobs = _os.path.join(qpath, 'bjobs')
return _run.is_exe(bjobs)
###########################################################################
# Normalization Functions #
###########################################################################
def normalize_job_id(self, job_id):
"""Convert the job id into job_id, array_id."""
# Look for job_id and array_id.
# Arrays are specified in LSF using the following syntax:
# - job_id[array_id] (e.g. 1234[1])
jobquery = _re.compile(r'(\d+)(\[(\d+)\])?')
job_id, _, array_id = jobquery.match(job_id).groups()
return job_id, array_id
def normalize_state(self, state):
"""Convert state into standadized (LSF style) state."""
if state.upper() in LSF_SLURM_STATES:
state = LSF_SLURM_STATES[state.upper()]
return state
def normalize_time(self, lsf_time):
"""Convert LSF time into standarized (LSF style) time:
LSF format:
* Aug 18 14:31 [E|L|X] (MMM DD HH:MM)
[E|L|X] Job estimation modifiers
- A dash indicates that the pending, suspended, or job with no
run limit has no estimated finish time.
SLURM format:
* 2019-08-18T14:31:00 (YYYY-MM-DDTHH:MM:SS)
"""
# LSF and SLURM formats
LSF_FORMAT = "%b %d %H:%M"
SLURM_FORMAT = "%Y-%m-%dT%H:%M:%S"
# Dash indicates no estimated finish time
if lsf_time == '-':
return 'None'
# Remove optional job estimation modifiers [ELX]
timequery = _re.compile(r'^([^ELX]+)( [ELX])?$')
lsf_time, est_mod = timequery.match(lsf_time).groups()
# Convert to LSF time
date = _dt.datetime.strptime(lsf_time, LSF_FORMAT)
# Year is not included in LSF time! include current Year
date = date.replace(year=_dt.datetime.now().year)
return date.strftime(SLURM_FORMAT)
def normalize_elapsed(self, lsf_elapsed):
"""Convert LSF elapsed time into standarized (LSF style) time:
LSF format:
* 122 second(s)
SLURM format:
* 02:02 [DD-[HH:]]MM:SS
"""
# Dash indicates no estimated finish time
if lsf_elapsed == '-':
return '00:00'
# Parse seconds from LSF format
elapsedquery = _re.compile(r'^(\d+) second\(s\)$')
lsf_elapsed = int(elapsedquery.match(lsf_elapsed).groups()[0])
# Convert to LSF elapsed format
elapsed = _dt.timedelta(seconds=lsf_elapsed)
days = elapsed.days
hours, remainder = divmod(elapsed.seconds, 3600)
minutes, seconds = divmod(remainder, 3600)
s = ("{:02d}-".format(days) if days else "") + \
("{:02d}:".format(hours) if hours else "") + \
"{:02d}:{:02d}".format(minutes, seconds)
return s
def normalize_int(self, value):
"""Convert possible int from job info (bjobs) into python3 int.
It can be '-' or '' if job is still pending or due to an error.
"""
try:
int_value = int(value) if value else None
except ValueError:
int_value = None
return int_value
###########################################################################
# Job Submission #
###########################################################################
def gen_scripts(self, job_object, command, args, precmd, modstr):
"""Can't create the scripts in server side since Job and Script object
won't be serialized correctly by Pyro4.
Parameters
---------
job_object : fyrd.job.Job
command : str
Command to execute
args : list
List of additional arguments, not used in this script.
precmd : str
String from options_to_string() to add at the top of the file,
should contain batch system directives
modstr : str
String to add after precmd, should contain module directives.
Returns
-------
fyrd.script_runners.Script
The submission script
fyrd.script_runners.Script
The execution script
"""
raise NotImplementedError()
def submit(self, script_file_name, dependencies=None):
"""Submit any file with dependencies to LSF.
Parameters
----------
script_file_name : str
Path of the script to be submitted
dependencies : list
List of dependencies
Returns
-------
results: dict
Dictionary containing the results and/or errors.
If the execution have no errors, it returns the job_id as the
result.
"""
_logme.log('Submitting to LSF', 'debug')
if dependencies:
deps = '-w "{}"'.format(
'&&'.join(['done({})'.format(d) for d in dependencies]))
args = ['bsub', deps, '<', script_file_name]
else:
args = ['bsub', '<', script_file_name]
# Try to submit job 5 times
code, stdout, stderr = _run.cmd(args, tries=5)
if code == 0:
# Job id is returned like this by LSF:
# 'Job <165793> is submitted to queue <sequential>.'
jobquery = _re.compile(r'<(\d+)>')
job_id, _ = self.normalize_job_id(jobquery.findall(stdout)[0])
else:
_logme.log('bsub failed with code {}\n'.format(code) +
'stdout: {}\nstderr: {}'.format(stdout, stderr),
'critical')
# raise _CalledProcessError(code, args, stdout, stderr)
# XXX: ?????
# Pyro4 can't serialize CalledProcessError
return {'error': True, 'stdout': stdout, 'stderr': stderr}
return {'error': False, 'result': job_id}
###########################################################################
# Job Management #
###########################################################################
def kill(self, job_ids):
"""Terminate all jobs in job_ids.
Parameters
----------
job_ids : list or str
A list of valid job ids or a single valid job id
Returns
-------
success : bool
"""
o = _run.cmd('bkill {0}'.format(' '.join(_run.listify(job_ids))),
tries=5)
return o[0] == 0
###########################################################################
# Queue Parsing #
###########################################################################
def queue_parser(self, user=None, partition=None, job_id=None):
"""Iterator for LSF queues.
Use the `bjobs -o` command to get standard data across implementation.
To fully read all jobs (finished and unfinished), option `-a` is used.
Jobs are retired from the LSF history after reaching interval specified
by CLEAN_PERIOD in lsb.params (default period is 1 hour).
Parameters
----------
user : str, optional
User name to pass to bjobs to filter queue with
partiton : str, optional
Partition to filter the queue with
job_id: str, optional
Job ID to filter the queue with
Yields
------
job_id : str
array_id : str or None
name : str
userid : str
partition : str
state :str
nodelist : list
numnodes : int
cntpernode : int or None
exit_code : int or None
"""
try:
if job_id:
int(job_id)
except ValueError:
job_id = None
fwdth = 400 # Used for fixed-width parsing of bjobs
fields = [
'jobid', 'name', 'user', 'queue', 'stat',
'exec_host', 'nexec_host', 'slots', 'exit_code'
]
flen = len(fields)
# Arguments used for bjobs:
# - noheader: remove header from 1st row
# - a: show jobs in all states
# - X: display uncondensed output for hosts
# - o: customized output formats
#
qargs = [
'bjobs', '-noheader', '-a', '-X', '-o',
'"{}"'.format(' '.join(['{0}:{1}'.format(field, fwdth)
for field in fields]))
]
#
# Parse queue info by length
# - Each job entry is separated by '\n'
# - Each job entry is a tuple with each field value
# [ (fld1, fld2, fl3, ...), (...) ]
#
# bjobs returns 'No unfinished job found' on stderr when list is empty
#
bjobs = [
tuple(
[k[i:i+fwdth].strip() for i in range(0, fwdth*flen, fwdth)]
) for k in _run.cmd(qargs)[1].split('\n') if k
]
# Sanitize data
for binfo in bjobs:
if len(binfo) == len(fields):
# jobid -> bid ($jobid)
# name -> bname ($job_name | $job_name[#array_num])
# user -> buser ($user_name)
# queue -> bpartition ($queue)
# stat -> bstate (PEND | RUN | DONE | EXIT...)
# exec_host -> bndlst (cpus*nodeid:cpus*nodeid...)
# nexec_host -> bnodes ($num_nodes)
# slots -> bcpus ($total_tasks)
# exit_code -> bcode ($exit_code)
[bid, bname, buser, bpartition, bstate,
bndlst, bnodes, bcpus, bcode] = binfo
else:
_sys.stderr.write('{}'.format(repr(binfo)))
raise _ClusterError('Queue parsing error, expected {} items '
'in output of bjobs, got {}\n'
.format(len(fields), len(binfo)))
# If not my partition go to next extry
if partition and bpartition != partition:
continue
# Normalize bid and barr
if not isinstance(bid, (_str, _txt)):
bid = str(bid) if bid else None
bid, barr = self.normalize_job_id(bid)
# Normalize nodes, cpus, state and exit_code
# '-'
if not isinstance(bnodes, _int):
bnodes = self.normalize_int(bnodes)
if not isinstance(bcpus, _int):
bcpus = self.normalize_int(bcpus)
if not isinstance(bcode, _int):
bcode = self.normalize_int(bcode)
bstate = self.normalize_state(bstate)
# If user or job id are used to filter skip to next if not found
if buser.isdigit():
buser = _pwd.getpwuid(int(buser)).pw_name
if user and buser != user:
continue
if job_id and (job_id != bid):
continue
# Attempt to parse nodelist
# LSF node list:"16*s01r1b14:16*s01r1b12:16*s01r1b08:16*s01r1b28"
bnodelist = []
nodequery = _re.compile(r'(\d+\*)?(\w+)')
if bndlst:
# [ (cores, node1), (cores, node2), ...]
if nodequery.search(bndlst):
nsplit = nodequery.findall(bndlst)
for nrg in nsplit:
cores, node = nrg
bnodelist.append(node)
else:
bnodelist = bndlst.split(':')
yield (bid, barr, bname, buser, bpartition, bstate, bnodelist,
bnodes, bcpus, bcode)
def parse_strange_options(self, option_dict):
"""Parse all options that cannot be handled by the regular function.
Handled on client side.
Parameters
----------
option_dict : dict
All keyword arguments passed by the user that are not already
defined in the Job object
Returns
-------
list
A list of strings to be added at the top of the script file
dict
Altered version of option_dict with all options that can't be
handled by `fyrd.batch_systems.options.option_to_string()` removed.
None
Would contain additional arguments to pass to sbatch, but these
are not needed so we just return None
"""
raise NotImplementedError
class LSFClient(BatchSystemClient):
"""Overwrite simple methods that can be executed in localhost, to avoid
some network overhead.
"""
NAME = 'lsf'
PREFIX = '#BSUB'
PARALLEL = 'mpirun'
def metrics(self, job_id=None):
server = self.get_server()
return server.metrics(job_id=job_id)
def normalize_job_id(self, job_id):
"""Convert the job id into job_id, array_id."""
# Look for job_id and array_id:
# e.g.: 1234[1]
jobquery = _re.compile(r'(\d+)(\[(\d+)\])?')
job_id, _, array_id = jobquery.match(job_id).groups()
return job_id, array_id
def normalize_state(self, state):
"""Convert state into standadized (LSF style) state."""
if state.upper() in LSF_SLURM_STATES:
state = LSF_SLURM_STATES[state.upper()]
return state
def gen_scripts(self, job_object, command, args, precmd, modstr):
"""Build the submission script objects.
Creates an exec script as well as a submission script.
Parameters
---------
job_object : fyrd.job.Job
command : str
Command to execute
args : list
List of additional arguments, not used in this script.
precmd : str
String from options_to_string() to add at the top of the file,
should contain batch system directives
modstr : str
String to add after precmd, should contain module directives.
Returns
-------
fyrd.script_runners.Script
The submission script
fyrd.script_runners.Script
The execution script
"""
scrpt = '{}.{}.{}'.format(job_object.name, job_object.suffix, SUFFIX)
# Use a single script to run the job and avoid using srun in order to
# allow sequential and parallel executions to live together in job.
# NOTE: the job is initially executed in sequential mode, and the
# programmer is responsible of calling their parallel codes by means
# of self.PARALLEL preffix.
job_object._mode = 'remote'
sub_script = _scrpts.CMND_RUNNER_TRACK.format(
precmd=precmd, usedir=job_object.runpath, name=job_object.name,
command=command
)
job_object._mode = 'local'
# Create the sub_script Script object
sub_script_obj = _Script(
script=sub_script, file_name=scrpt, job=job_object
)
return sub_script_obj, None
def submit(self, script, dependencies=None,
job=None, args=None, kwds=None):
"""Submit any file with dependencies to LSF.
Parameters
----------
script : fyrd.Script
Script to be submitted
dependencies : list
List of dependencies
job : fyrd.job.Job, not implemented
A job object for the calling job, not used by this functions
args : list, not implemented
A list of additional command line arguments to pass when
submitting, not used by this function
kwds : dict or str, not implemented
A dictionary of keyword arguments to parse with options_to_string,
or a string of option:value,option,option:value,.... Not used by
this function.
Returns
-------
job_id : str
"""
script.job_object._mode = 'remote'
result = self.get_server().submit(
script.file_name, dependencies=dependencies
)
script.job_object._mode = 'local'
return result
def parse_strange_options(self, option_dict):
"""Parse all options that cannot be handled by the regular function.
Parameters
----------
option_dict : dict
All keyword arguments passed by the user that are not already
defined in the Job object
Returns
-------
list
A list of strings to be added at the top of the script file
dict
Altered version of option_dict with all options that can't be
handled by `fyrd.batch_systems.options.option_to_string()` removed.
None
Would contain additional arguments to pass to sbatch, but these
are not needed so we just return None
"""
outlist = []
# Used to figure out tasks_per_node or cpus_per_task if not passed.
cores_per_node = None
if 'cores_per_node' in option_dict:
cores_per_node = int(option_dict.pop('cores_per_node'))
# Convert nodes to tasks (-n): use nodes and tasks_per_node
nodes = None
if 'nodes' in option_dict:
nodes = int(option_dict.pop('nodes'))
# 'tasks' option is not compatible with 'nodes'
if 'tasks' in option_dict:
option_dict.pop('tasks')
# Number of tasks (-n)
tasks = None
if 'tasks' in option_dict:
tasks = int(option_dict.pop('tasks'))
# First look for tasks_per_node, if it's not there change to cores
# Cores refers to the max number of processors to use per node (ppn)
if 'tasks_per_node' in option_dict:
tasks_per_node = int(option_dict.pop('tasks_per_node'))
if 'cores' in option_dict:
# Avoid option parser to raise errors
option_dict.pop('cores')
elif 'cores' in option_dict:
tasks_per_node = int(option_dict.pop('cores'))
if not nodes:
nodes = 1
elif cores_per_node:
tasks_per_node = cores_per_node
else:
raise _ClusterError('Error parsing LSF options: cannot guess '
'tasks_per_node in job without \'\' '
'specifying \'cores_per_node\' option.')
# Use cpus_per_task for SMT affinity (affinity[core(#cores)])
if 'cpus_per_task' in option_dict:
cpus_per_task = int(option_dict.pop('cpus_per_task'))
elif cores_per_node and tasks_per_node:
cpus_per_task = max(cores_per_node // tasks_per_node, 1)
else:
raise _ClusterError('Error parsing LSF options: cannot guess '
'cpus_per_task in job without \'\' '
'specifying \'cores_per_node\' option.')
# Use the final 'tasks_per_node' to define tasks if 'nodes' is passed
if nodes:
tasks = nodes * tasks_per_node
# LSF is a bit special for pure threaded jobs, cpus_per_task must be
# specified then for tasks, span[ptile] args and affinity[core(1)].
if tasks == 1:
tasks = cpus_per_task
tasks_per_node = cpus_per_task
cpus_per_task = 1
# Add the LSF parameters that define the job size:
# - Number of tasks (-n)
# - Tasks per node (-R span[ptile=#tpn])
# - Cpus per task (-R affinity[core(#cpt)])
outlist.append('{} -n {}'.format(self.PREFIX, tasks))
outlist.append('{} -R "span[ptile={}]"'.format(self.PREFIX,
tasks_per_node))
outlist.append('{} -R "affinity[core({})]"'.format(self.PREFIX,
cpus_per_task))
if 'exclusive' in option_dict:
option_dict.pop('exclusive')
outlist.append('{} -x'.format(self.PREFIX))
# Parse time in LSF no seconds -> [hour:]minutes (remove seconds)
if 'time' in option_dict:
time = option_dict.pop('time')
timequery = _re.compile(r'((\d+\:)?(\d+))')
time = timequery.search(time)[0]
outlist.append('{} -W {}'.format(self.PREFIX, time))
# Remove qos option if any
if 'qos' in option_dict:
option_dict.pop('qos')
return outlist, option_dict, None
| 36.532764 | 79 | 0.526632 |
aceba031d933118ed55a055c31dd2f52fb0f310f | 9,464 | py | Python | main_train.py | pierrefdz/HiDDeN | c1ca842389f86239c4e3ac9911f784cd3965f260 | [
"MIT"
] | null | null | null | main_train.py | pierrefdz/HiDDeN | c1ca842389f86239c4e3ac9911f784cd3965f260 | [
"MIT"
] | null | null | null | main_train.py | pierrefdz/HiDDeN | c1ca842389f86239c4e3ac9911f784cd3965f260 | [
"MIT"
] | null | null | null | import argparse
import datetime
import json
import logging
import os
import pprint
import time
from pathlib import Path
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import utils
import utils_train
from hidden_configuration import *
from model.hidden import Hidden
from noise_layers.noiser import Noiser, parse_attack_args
from torch.utils.tensorboard import SummaryWriter
from PIL import PngImagePlugin
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def get_parser():
parser = argparse.ArgumentParser()
# Data and checkpoint dirs
parser.add_argument('--train_dir', default='/checkpoint/pfz/watermarking/data/train_coco_10k_resized', type=str)
parser.add_argument('--val_dir', default='/checkpoint/pfz/watermarking/data/coco_1k_resized', type=str)
parser.add_argument('--output_dir', default="", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--saveckp_freq', default=50, type=int)
parser.add_argument('--resume_from', default=None, type=str, help='Checkpoint path to resume from.')
# Network params
# parser.add_argument('--attack', nargs='*', action=NoiseArgParser, default="")
parser.add_argument('--attacks', default="", type=str)
parser.add_argument('--num_bits', default=30, type=int)
# Loss params
parser.add_argument('--lambda_dec', default=1, type=float)
parser.add_argument('--lambda_enc', default=0.7, type=float)
parser.add_argument('--lambda_adv', default=1e-3, type=float)
# Optimization params
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--enable_fp16', default=False, choices=[False], type=utils.bool_inst)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_workers', default=8, type=int)
# Distributed training parameters
# parser.add_argument("--dist_url", default="env://", type=str)
# parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('--debug_slurm', action='store_true')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--master_port', default=-1, type=int)
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
return parser
def train(args):
# Distributed mode
utils_train.init_distributed_mode(args)
cudnn.benchmark = True
# Set seeds for reproductibility
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
np.random.seed(0)
# Logger
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
print("__log__:%s" % json.dumps(vars(params)))
if utils_train.is_main_process():
writer = SummaryWriter(os.path.join(args.output_dir, 'tensorboard'))
args.writer = writer
# Git SHA
print("git:{}".format(utils.get_sha()))
# Preparing data
train_loader, val_loader = utils_train.get_data_loaders(args.train_dir, args.val_dir, args.batch_size, args.num_workers)
# Build HiDDeN and Noise model
hidden_args = {'message_length':args.num_bits,
'encoder_blocks':4, 'encoder_channels':64,
'decoder_blocks':7, 'decoder_channels':64,
'use_discriminator':True,'use_vgg':False,
'discriminator_blocks':3, 'discriminator_channels':64,
'decoder_loss':args.lambda_dec,
'encoder_loss':args.lambda_enc,
'adversarial_loss':args.lambda_adv,
'enable_fp16':args.enable_fp16
}
attack_config = parse_attack_args(args.attacks)
hidden_config = HiDDenConfiguration(**hidden_args)
args.config_path = os.path.join(args.output_dir, 'config.json')
if not os.path.exists(args.config_path):
with open(args.config_path, 'w') as f:
hidden_args['attacks_arg'] = args.attacks
json.dump(hidden_args, f)
attacker = Noiser(attack_config, device)
hidden_net = Hidden(hidden_config, device, attacker)
# Optionally resume training
args.checkpoint_path = os.path.join(args.output_dir, "checkpoint.pth")
if os.path.exists(args.checkpoint_path):
print('Loading checkpoint from file %s'%args.checkpoint_path)
checkpoint = torch.load(args.checkpoint_path, map_location="cpu")
start_epoch = checkpoint['epoch']
utils.model_from_checkpoint(hidden_net, checkpoint)
elif args.resume_from is not None:
print('Loading checkpoint from file %s'%args.resume_from)
checkpoint = torch.load(args.resume_from, map_location="cpu")
start_epoch = checkpoint['epoch']
utils.model_from_checkpoint(hidden_net, checkpoint)
else:
start_epoch = 0
# Distributed training
hidden_net.encoder_decoder = nn.parallel.DistributedDataParallel(hidden_net.encoder_decoder, device_ids=[args.local_rank])
hidden_net.discriminator = nn.parallel.DistributedDataParallel(hidden_net.discriminator, device_ids=[args.local_rank])
# Log
if True:
# print('HiDDeN model: {}\n'.format(hidden_net.to_string()))
print('Model Configuration:\n')
print(pprint.pformat(vars(hidden_config)))
print('\nNoise configuration:\n')
print(str(attack_config))
# Train
start_time = time.time()
print("Starting HiDDeN training !")
for epoch in range(start_epoch, args.epochs):
print("========= Epoch %3i ========="%epoch)
train_loader.sampler.set_epoch(epoch)
val_loader.sampler.set_epoch(epoch)
# training one epoch
train_stats = train_one_epoch(hidden_net, train_loader, epoch, args)
val_stats = val_one_epoch(hidden_net, val_loader, epoch, args)
save_dict = {
'enc-dec-model': hidden_net.encoder_decoder.state_dict(),
'enc-dec-optim': hidden_net.optimizer_enc_dec.state_dict(),
'discrim-model': hidden_net.discriminator.state_dict(),
'discrim-optim': hidden_net.optimizer_discrim.state_dict(),
'epoch': epoch+1
}
utils_train.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth'))
if args.saveckp_freq and epoch % args.saveckp_freq == 0:
utils_train.save_on_master(save_dict, os.path.join(args.output_dir, f'checkpoint{epoch:04}.pth'))
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 'epoch': epoch}
log_stats_val = {**{f'val_{k}': v for k, v in val_stats.items()}, 'epoch': epoch}
if utils_train.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
f.write(json.dumps(log_stats_val) + "\n")
args.writer.add_scalar('Loss/loss', train_stats['loss'], epoch)
args.writer.add_scalar('Loss/loss_img', train_stats['encoder_mse'], epoch)
args.writer.add_scalar('Loss/loss_msg', train_stats['dec_mse'], epoch)
args.writer.add_scalar('Loss/loss_discr', train_stats['discr_cover_bce']+train_stats['discr_encod_bce'], epoch)
args.writer.add_scalar('Loss/loss_val', val_stats['loss'], epoch)
args.writer.add_scalar('BER/train', train_stats['bitwise-error'], epoch)
args.writer.add_scalar('BER/val', val_stats['bitwise-error'], epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def train_one_epoch(hidden_net, data_loader, epoch, args):
metric_logger = utils_train.MetricLogger(delimiter=" ")
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
for it, (imgs, _) in enumerate(metric_logger.log_every(data_loader, 10, header)):
imgs = imgs.to(device, non_blocking=True) # BxCxHxW
msgs = utils.generate_messages(imgs.size(0), args.num_bits).to(device).type(torch.float) # BxK
losses, _ = hidden_net.train_on_batch([imgs, msgs])
torch.cuda.synchronize()
for name, loss in losses.items():
metric_logger.update(**{name:loss})
metric_logger.synchronize_between_processes()
print(">>> Averaged train stats: ", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def val_one_epoch(hidden_net, data_loader, epoch, args):
metric_logger = utils_train.MetricLogger(delimiter=" ")
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
for it, (imgs, _) in enumerate(metric_logger.log_every(data_loader, 50, header)):
imgs = imgs.to(device, non_blocking=True) # BxCxHxW
msgs = utils.generate_messages(imgs.size(0), args.num_bits).to(device).type(torch.float) # BxK
losses, _ = hidden_net.validate_on_batch([imgs, msgs])
torch.cuda.synchronize()
for name, loss in losses.items():
metric_logger.update(**{name:loss})
metric_logger.synchronize_between_processes()
print(">>> Averaged val stats: ", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if __name__ == '__main__':
# generate parser / parse parameters
parser = get_parser()
params = parser.parse_args()
# run experiment
train(params)
| 42.25 | 126 | 0.68787 |
aceba098838f059d3e5dc9988104ed7a56b31c8d | 5,707 | py | Python | olympic/train.py | oscarknagg/olympic-pytorch | 34061c484356e0fb56abe4cba9d4f3f86fc3eb4e | [
"MIT"
] | 4 | 2018-12-27T07:08:01.000Z | 2020-08-15T14:48:49.000Z | olympic/train.py | oscarknagg/olympic-pytorch | 34061c484356e0fb56abe4cba9d4f3f86fc3eb4e | [
"MIT"
] | 9 | 2020-03-24T16:25:15.000Z | 2022-03-11T23:35:35.000Z | olympic/train.py | oscarknagg/olympic-pytorch | 34061c484356e0fb56abe4cba9d4f3f86fc3eb4e | [
"MIT"
] | null | null | null | import torch
from torch.optim import Optimizer
from torch.nn import Module
from torch.utils.data import DataLoader
from typing import Callable, List, Union
from olympic.callbacks import DefaultCallback, ProgressBarLogger, CallbackList, Callback
from olympic.metrics import NAMED_METRICS
def gradient_step(model: Module,
optimiser: Optimizer,
loss_fn: Callable,
x: torch.Tensor,
y: torch.Tensor,
epoch: int,
**kwargs):
"""Takes a single gradient step.
Args:
model: Model to be fitted
optimiser: Optimiser to calculate gradient step from loss
loss_fn: Loss function to calculate between predictions and outputs
x: Input samples
y: Input targets
"""
model.train()
optimiser.zero_grad()
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimiser.step()
return loss, y_pred
def batch_metrics(model: Module,
y_pred: torch.Tensor,
y: torch.Tensor,
metrics: List[Union[str, Callable]],
batch_logs: dict):
"""Calculates metrics for the current training batch
Args:
model: Model being fit
y_pred: predictions for a particular batch
y: labels for a particular batch
batch_logs: Dictionary of logs for the current batch
"""
model.eval()
for m in metrics:
if isinstance(m, str):
batch_logs[m] = NAMED_METRICS[m](y, y_pred)
else:
# Assume metric is a callable function
batch_logs = m(y, y_pred)
return batch_logs
def fit(model: Module, optimiser: Optimizer,
loss_fn: Callable,
epochs: int,
dataloader: DataLoader,
prepare_batch: Callable = lambda batch: batch,
metrics: List[Union[str, Callable]] = None,
callbacks: List[Callback] = None,
verbose: bool =True,
update_fn: Callable = gradient_step,
update_fn_kwargs: dict = {}) -> List[dict]:
"""Function to abstract away training loop.
The benefit of this function is that allows training scripts to be much more readable and allows for easy re-use of
common training functionality provided they are written as a subclass of olympic.Callback (following the
Keras API).
Args:
model: Model to be fitted.
optimiser: Optimiser to calculate gradient step from loss
loss_fn: Loss function to calculate between predictions and outputs
epochs: Number of epochs of fitting to be performed
dataloader: `torch.DataLoader` instance to supply training data
prepare_batch: Callable to perform any desired preprocessing on a batch
metrics: Optional list of metrics to evaluate the model with. Each metric should either be a string
corresponding to a function in ``olympic.metrics`` (`metrics`). These metrics will automatically be
calculated over the training set. You must supply the ``Evaluate`` callback in order to calculate
these metrics over another dataset (i.e. validation)
callbacks: List of olympic.Callback instances. These implement additional functionality to incorporate
into training such as logging metrics to csv, model checkpointing, learning rate scheduling etc...
See olympic.callbacks (``callbacks``) for more.
verbose: All print output is muted if this argument is `False`
update_fn: Function for calculating gradients. Leave as default for simple supervised training on labelled
batches. For more complex training procedures (meta-learning etc...) you will need to write your own
``update_fn``
update_fn_kwargs: Keyword arguments to pass to ``update_fn``
Returns:
training_logs: List of dicts where each dict contains information on each epoch. This is equivalent to
the data that will be written to CSV using the CSVLogger callback
"""
# Determine number of samples:
num_batches = len(dataloader)
batch_size = dataloader.batch_size
callbacks = CallbackList([DefaultCallback(), ] + (callbacks or []) + [ProgressBarLogger(), ])
callbacks.set_model(model)
callbacks.set_params({
'num_batches': num_batches,
'batch_size': batch_size,
'verbose': verbose,
'metrics': (metrics or []),
'prepare_batch': prepare_batch,
'loss_fn': loss_fn,
'optimiser': optimiser
})
if verbose:
print('Begin training...')
callbacks.on_train_begin()
training_logs = []
for epoch in range(1, epochs+1):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
for batch_index, batch in enumerate(dataloader):
batch_logs = dict(batch=batch_index, size=(batch_size or 1))
callbacks.on_batch_begin(batch_index, batch_logs)
x, y = prepare_batch(batch)
loss, y_pred = update_fn(model, optimiser, loss_fn, x, y, epoch, **update_fn_kwargs)
batch_logs['loss'] = loss.item()
# Loops through all metrics
batch_logs = batch_metrics(model, y_pred, y, metrics, batch_logs)
callbacks.on_batch_end(batch_index, batch_logs)
if hasattr(model, 'stop_training'):
if model.stop_training:
break
# Run on epoch end
epoch_logs = callbacks.on_epoch_end(epoch, epoch_logs)
training_logs.append(epoch_logs)
# Run on train end
if verbose:
print('Finished.')
callbacks.on_train_end()
return training_logs
| 36.350318 | 119 | 0.647976 |
aceba0a9dc07a40ed25d19ffebcfb347a9efa131 | 525 | py | Python | challenges/challenge18.py | stevenliu216/challenges | a8991fc3cc2309f8ef0ba6d189be001377153583 | [
"MIT"
] | null | null | null | challenges/challenge18.py | stevenliu216/challenges | a8991fc3cc2309f8ef0ba6d189be001377153583 | [
"MIT"
] | 14 | 2018-09-18T02:00:28.000Z | 2019-07-08T15:59:56.000Z | challenges/challenge18.py | stevenliu216/challenges | a8991fc3cc2309f8ef0ba6d189be001377153583 | [
"MIT"
] | 7 | 2018-09-17T14:52:24.000Z | 2020-10-02T21:55:20.000Z | def exceptions(a, b):
try:
division = int(a) // int(b)
print(division)
return division
except (ZeroDivisionError, ValueError) as e:
print(f"Error Code: {e}")
return f"Error Code: {e}"
if __name__ == "__main__":
dividers = input("Enter the inputs to divide: ").split()
t = int(dividers[0])
ab_pairs = []
for _ in range(t):
ab_pairs.append(list(input().split()))
print("\n")
for i in range(t):
exceptions(ab_pairs[i][0], ab_pairs[i][1])
| 26.25 | 60 | 0.565714 |
aceba0c19a07044af96f8a54260dcc59dcc1203f | 21,719 | py | Python | src/pretalx/cfp/flow.py | chriswolfdesign/pretalx | fb6bcf090a5c92e55a79851d60dfc716309da557 | [
"Apache-2.0"
] | null | null | null | src/pretalx/cfp/flow.py | chriswolfdesign/pretalx | fb6bcf090a5c92e55a79851d60dfc716309da557 | [
"Apache-2.0"
] | null | null | null | src/pretalx/cfp/flow.py | chriswolfdesign/pretalx | fb6bcf090a5c92e55a79851d60dfc716309da557 | [
"Apache-2.0"
] | null | null | null | import copy
import json
import logging
from collections import OrderedDict
from contextlib import suppress
from pathlib import Path
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login
from django.core.files.storage import FileSystemStorage
from django.core.files.uploadedfile import UploadedFile
from django.db.models import Q
from django.forms import ValidationError
from django.http import HttpResponseNotAllowed
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from django.views.generic.base import TemplateResponseMixin
from i18nfield.strings import LazyI18nString
from i18nfield.utils import I18nJSONEncoder
from pretalx.cfp.signals import cfp_steps
from pretalx.common.exceptions import SendMailException
from pretalx.common.phrases import phrases
from pretalx.common.utils import language
from pretalx.person.forms import SpeakerProfileForm, UserForm
from pretalx.person.models import User
from pretalx.submission.forms import InfoForm, QuestionsForm
from pretalx.submission.models import QuestionTarget, SubmissionType, Track
def i18n_string(data, locales):
if isinstance(data, LazyI18nString):
return data
data = copy.deepcopy(data)
with language("en"):
if getattr(data, "_proxy____prepared", None):
data = str(data)
if isinstance(data, str):
data = {"en": str(data)}
if not isinstance(data, dict):
data = {"en": ""}
english = data.get("en", "")
for locale in locales:
if locale != "en" and not data.get(locale):
with language(locale):
data[locale] = gettext(english)
return LazyI18nString(data)
def cfp_session(request):
request.session.modified = True
if "cfp" not in request.session or not request.session["cfp"]:
request.session["cfp"] = {}
key = request.resolver_match.kwargs["tmpid"]
if key not in request.session["cfp"]:
request.session["cfp"][key] = {
"data": {},
"initial": {},
"files": {},
}
return request.session["cfp"][key]
class BaseCfPStep:
icon = "pencil"
def __init__(self, event):
self.event = event
self.request = None
@property
def identifier(self):
raise NotImplementedError()
@property
def label(self):
raise NotImplementedError()
@property
def priority(self):
return 100
def is_applicable(self, request):
return True
def is_completed(self, request, warn=False):
raise NotImplementedError()
@cached_property
def cfp_session(self):
return cfp_session(self.request)
def get_next_applicable(self, request):
next_step = getattr(self, "_next", None)
if next_step:
if not next_step.is_applicable(request):
return next_step.get_next_applicable(request)
return next_step
def get_prev_applicable(self, request):
previous_step = getattr(self, "_previous", None)
if previous_step:
if not previous_step.is_applicable(request):
return previous_step.get_prev_applicable(request)
return previous_step
def get_prev_url(self, request):
prev = self.get_prev_applicable(request)
if prev:
return prev.get_step_url(request)
def get_next_url(self, request):
n = self.get_next_applicable(request)
if n:
return n.get_step_url(request)
def get_step_url(self, request):
kwargs = request.resolver_match.kwargs
kwargs["step"] = self.identifier
url = reverse("cfp:event.submit", kwargs=kwargs)
if request.GET:
url += f"?{request.GET.urlencode()}"
return url
def get(self, request):
return HttpResponseNotAllowed([])
def post(self, request):
return HttpResponseNotAllowed([])
def done(self, request):
pass
class TemplateFlowStep(TemplateResponseMixin, BaseCfPStep):
template_name = "cfp/event/submission_step.html"
def get_context_data(self, **kwargs):
kwargs.setdefault("step", self)
kwargs.setdefault("event", self.event)
kwargs.setdefault("prev_url", self.get_prev_url(self.request))
kwargs.setdefault("next_url", self.get_next_url(self.request))
kwargs.setdefault(
"cfp_flow",
[
step
for step in self.event.cfp_flow.steps
if step.is_applicable(self.request)
],
)
return kwargs
def render(self, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get(self, request):
self.request = request
return self.render()
@property
def identifier(self):
raise NotImplementedError()
class FormFlowStep(TemplateFlowStep):
form_class = None
file_storage = FileSystemStorage(str(Path(settings.MEDIA_ROOT) / "cfp_uploads"))
def get_form_initial(self):
initial_data = self.cfp_session.get("initial", {}).get(self.identifier, {})
previous_data = self.cfp_session.get("data", {}).get(self.identifier, {})
return copy.deepcopy({**initial_data, **previous_data})
def get_form(self, from_storage=False):
if self.request.method == "GET" or from_storage:
return self.form_class(
data=self.get_form_initial() or None,
initial=self.get_form_initial(),
files=self.get_files(),
**self.get_form_kwargs(),
)
return self.form_class(
data=self.request.POST, files=self.request.FILES, **self.get_form_kwargs()
)
def is_completed(self, request):
self.request = request
return self.get_form(from_storage=True).is_valid()
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
result["form"] = self.get_form()
return result
def post(self, request):
self.request = request
form = self.get_form()
if not form.is_valid():
return self.get(request)
self.set_data(form.cleaned_data)
self.set_files(form.files)
next_url = self.get_next_url(request)
return redirect(next_url) if next_url else None
def set_data(self, data):
def serialize_value(value):
if getattr(value, "file", None):
return None
if getattr(value, "pk", None):
return value.pk
if getattr(value, "__iter__", None):
return [serialize_value(v) for v in value]
if getattr(value, "serialize", None):
return value.serialize()
return str(value)
self.cfp_session["data"][self.identifier] = json.loads(
json.dumps(data, default=serialize_value)
)
def get_files(self):
saved_files = self.cfp_session["files"].get(self.identifier, {})
files = {}
for field, field_dict in saved_files.items():
field_dict = field_dict.copy()
tmp_name = field_dict.pop("tmp_name")
files[field] = UploadedFile(
file=self.file_storage.open(tmp_name), **field_dict
)
return files or None
def set_files(self, files):
for field, field_file in files.items():
tmp_filename = self.file_storage.save(field_file.name, field_file)
file_dict = {
"tmp_name": tmp_filename,
"name": field_file.name,
"content_type": field_file.content_type,
"size": field_file.size,
"charset": field_file.charset,
}
data = self.cfp_session["files"].get(self.identifier, {})
data[field] = file_dict
self.cfp_session["files"][self.identifier] = data
class GenericFlowStep:
@cached_property
def config(self):
return self.event.cfp_flow.config.get("steps", {}).get(self.identifier, {})
@property
def title(self):
return i18n_string(self.config.get("title", self._title), self.event.locales)
@property
def text(self):
return i18n_string(self.config.get("text", self._text), self.event.locales)
def get_form_kwargs(self):
return {
"event": self.request.event,
"field_configuration": self.config.get("fields"),
}
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
result["text"] = self.text
result["title"] = self.title
return result
class InfoStep(GenericFlowStep, FormFlowStep):
identifier = "info"
icon = "paper-plane"
form_class = InfoForm
priority = 0
@property
def label(self):
return _("General")
@property
def _title(self):
return _("Hey, nice to meet you!")
@property
def _text(self):
return _(
"We're glad that you want to contribute to our event with your submission. Let's get started, this won't take long."
)
def get_form_kwargs(self):
result = super().get_form_kwargs()
result["access_code"] = getattr(self.request, "access_code", None)
return result
def get_form_initial(self):
result = super().get_form_initial()
for field, model in (("submission_type", SubmissionType), ("track", Track)):
request_value = self.request.GET.get(field)
if request_value:
with suppress(AttributeError, TypeError):
pk = int(request_value.split("-")[0])
obj = model.objects.filter(event=self.request.event, pk=pk).first()
if obj:
result[field] = obj
return result
def done(self, request):
self.request = request
form = self.get_form(from_storage=True)
form.instance.event = self.event
form.save()
submission = form.instance
submission.speakers.add(request.user)
submission.log_action("pretalx.submission.create", person=request.user)
messages.success(request, phrases.cfp.submission_success)
additional_speaker = form.cleaned_data.get("additional_speaker").strip()
if additional_speaker:
try:
submission.send_invite(to=[additional_speaker], _from=request.user)
except SendMailException as exception:
logging.getLogger("").warning(str(exception))
messages.warning(self.request, phrases.cfp.submission_email_fail)
access_code = getattr(request, "access_code", None)
if access_code:
submission.access_code = access_code
submission.save()
access_code.redeemed += 1
access_code.save()
request.submission = submission
class QuestionsStep(GenericFlowStep, FormFlowStep):
identifier = "questions"
icon = "question-circle-o"
form_class = QuestionsForm
template_name = "cfp/event/submission_questions.html"
priority = 25
@property
def label(self):
return _("Questions")
@property
def _title(self):
return _("Tell us more!")
@property
def _text(self):
return _(
"Before we can save your submission, we have some more questions for you."
)
def is_applicable(self, request):
self.request = request
info_data = self.cfp_session.get("data", {}).get("info", {})
if not info_data or not info_data.get("track"):
return self.event.questions.all().exists()
return self.event.questions.exclude(
Q(target=QuestionTarget.SUBMISSION)
& (
(~Q(tracks__in=[info_data.get("track")]) & Q(tracks__isnull=False))
| (
~Q(submission_types__in=[info_data.get("submission_type")])
& Q(submission_types__isnull=False)
)
)
).exists()
def get_form_kwargs(self):
result = super().get_form_kwargs()
info_data = self.cfp_session.get("data", {}).get("info", {})
result["target"] = ""
result["track"] = info_data.get("track")
result["submission_type"] = info_data.get("submission_type")
if not self.request.user.is_anonymous:
result["speaker"] = self.request.user
return result
def done(self, request):
form = self.get_form(from_storage=True)
form.speaker = request.user
form.submission = request.submission
form.is_valid()
form.save()
class UserStep(GenericFlowStep, FormFlowStep):
identifier = "user"
icon = "user-circle-o"
form_class = UserForm
template_name = "cfp/event/submission_user.html"
priority = 49
@property
def label(self):
return _("Account")
@property
def _title(self):
return _(
"That's it about your submission! We now just need a way to contact you."
)
@property
def _text(self):
return _(
"To create your submission, you need an account on this page. This not only gives us a way to contact you, it also gives you the possibility to edit your submission or to view its current state."
)
def is_applicable(self, request):
return not request.user.is_authenticated
def done(self, request):
if not getattr(request.user, "is_authenticated", False):
form = self.get_form(from_storage=True)
form.is_valid()
uid = form.save()
request.user = User.objects.filter(pk=uid).first()
if not request.user or not request.user.is_active:
raise ValidationError(
_(
"There was an error when logging in. Please contact the organiser for further help."
),
)
login(
request, request.user, backend="django.contrib.auth.backends.ModelBackend"
)
class ProfileStep(GenericFlowStep, FormFlowStep):
identifier = "profile"
icon = "address-card-o"
form_class = SpeakerProfileForm
template_name = "cfp/event/submission_profile.html"
priority = 75
@property
def label(self):
return _("Profile")
@property
def _title(self):
return _("Tell us something about yourself!")
@property
def _text(self):
return _(
"This information will be publicly displayed next to your talk - you can always edit for as long as submissions are still open."
)
def get_form_kwargs(self):
result = super().get_form_kwargs()
user_data = copy.deepcopy(self.cfp_session.get("data", {}).get("user", {}))
if user_data and user_data.get("user_id"):
result["user"] = User.objects.filter(pk=user_data["user_id"]).first()
if not result.get("user") and self.request.user.is_authenticated:
result["user"] = self.request.user
user = result.get("user")
result["name"] = user.name if user else user_data.get("register_name")
result["read_only"] = False
result["essential_only"] = True
return result
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
email = getattr(self.request.user, "email", None)
if email is None:
data = self.cfp_session.get("data", {}).get("user", {})
email = data.get("register_email", "")
if email:
result["gravatar_parameter"] = User(email=email).gravatar_parameter
return result
def done(self, request):
form = self.get_form(from_storage=True)
form.is_valid()
form.user = request.user
form.save()
DEFAULT_STEPS = (
InfoStep,
QuestionsStep,
UserStep,
ProfileStep,
)
class CfPFlow:
"""An event's CfPFlow contains the list of CfP steps.
The ``event`` attribute contains the related event and is the only one required
for instantiation.
The ``steps`` attribute contains a (linked) list of BaseCfPStep instances.
The ``steps_dict`` attribute contains an OrderedDict of the same steps.
The ``config`` attribute contains the additional user configuration, primarily
from the CfP editor.
When instantiated with a request during submission time, it will only show
the forms relevant to the current request. When instantiated without a
request, for the CfP editor, it will contain all steps.
"""
event = None
def __init__(self, event):
self.event = event
data = event.settings.cfp_flow
self.config = self.get_config(data)
steps = [step(event=event) for step in DEFAULT_STEPS]
for __, response in cfp_steps.send_robust(self.event):
for step_class in response:
steps.append(step_class(event=event))
steps = sorted(steps, key=lambda step: step.priority)
self.steps_dict = OrderedDict()
for step in steps:
self.steps_dict[step.identifier] = step
previous_step = None
for step in steps:
step._previous = previous_step
if previous_step:
previous_step._next = step
previous_step = step
@property
def steps(self):
return list(self.steps_dict.values())
def get_config(self, data, json_compat=False):
if isinstance(data, str) and data:
data = json.loads(data)
if not isinstance(data, dict):
return {}
config = {"steps": {}}
steps = data.get("steps", {})
if isinstance(steps, list): # This is what we get from the editor
for entry in steps:
config["steps"][entry["identifier"]] = self._get_step_config_from_data(
entry
)
else:
for key, value in steps.items():
config["steps"][key] = self._get_step_config_from_data(value)
if json_compat:
config = json.loads(json.dumps(config, cls=I18nJSONEncoder))
return config
def get_editor_config(self, json_compat=False):
config = self.config
locales = self.event.locales
steps = []
for step in self.steps:
step_config = config.get("steps", {}).get(step.identifier, {})
if not isinstance(step, GenericFlowStep) or step.identifier == "user":
continue
steps.append(
{
"icon": step.icon,
"icon_label": step.label,
"title": step_config.get("title", step.title),
"text": step_config.get("text", step.text),
"identifier": step.identifier,
"fields": [
{
"widget": field.widget.__class__.__name__,
"key": key,
"label": i18n_string(field.label, locales),
"help_text": i18n_string(
getattr(field, "original_help_text", field.help_text),
locales,
),
"added_help_text": i18n_string(
getattr(field, "added_help_text", ""), locales,
),
"full_help_text": field.help_text,
"required": field.required,
}
for key, field in step.form_class(
event=self.event,
field_configuration=step_config.get("fields"),
).fields.items()
],
}
)
if json_compat:
steps = json.loads(json.dumps(steps, cls=I18nJSONEncoder))
return steps
def _get_step_config_from_data(self, data):
step_config = {}
locales = self.event.locales
for i18n_configurable in ("title", "text", "label"):
if i18n_configurable in data:
step_config[i18n_configurable] = i18n_string(
data[i18n_configurable], locales
)
for configurable in ("icon",):
if configurable in data:
step_config[configurable] = data[configurable]
step_config["fields"] = []
for config_field in data.get("fields", []):
field = {}
for key in ("help_text", "request", "required", "key"):
if key in config_field:
field[key] = (
i18n_string(config_field[key], locales)
if key == "help_text"
else config_field[key]
)
step_config["fields"].append(field)
return step_config
def get_config_json(self):
return json.dumps(self.config, cls=I18nJSONEncoder)
def save_config(self, data):
if isinstance(data, list) or (isinstance(data, dict) and "steps" not in data):
data = {"steps": data}
data = self.get_config(data, json_compat=True)
self.event.settings.cfp_flow = data
def reset(self):
self.save_config(None)
| 33.989045 | 207 | 0.5986 |
aceba0d01ebf83d24767290d6a144274794955f0 | 4,860 | py | Python | setup.py | Hidden-black/jishaku | d3f50749b5a977b544e5fd14894585f656247486 | [
"MIT"
] | 434 | 2018-01-04T05:57:46.000Z | 2022-03-29T12:52:54.000Z | setup.py | Hidden-black/jishaku | d3f50749b5a977b544e5fd14894585f656247486 | [
"MIT"
] | 134 | 2018-02-16T05:12:13.000Z | 2022-03-31T15:05:23.000Z | setup.py | Hidden-black/jishaku | d3f50749b5a977b544e5fd14894585f656247486 | [
"MIT"
] | 228 | 2017-12-18T18:02:17.000Z | 2022-03-29T23:25:41.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021 Devon (Gorialis) R
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pathlib
import re
import subprocess
from setuptools import setup
ROOT = pathlib.Path(__file__).parent
with open(ROOT / 'jishaku' / 'meta.py', 'r', encoding='utf-8') as f:
VERSION_MATCH = re.search(r'VersionInfo\(major=(\d+), minor=(\d+), micro=(\d+), .+\)', f.read(), re.MULTILINE)
if not VERSION_MATCH:
raise RuntimeError('version is not set or could not be located')
VERSION = '.'.join([VERSION_MATCH.group(1), VERSION_MATCH.group(2), VERSION_MATCH.group(3)])
EXTRA_REQUIRES = {}
for feature in (ROOT / 'requirements').glob('*.txt'):
with open(feature, 'r', encoding='utf-8') as f:
EXTRA_REQUIRES[feature.with_suffix('').name] = f.read().splitlines()
REQUIREMENTS = EXTRA_REQUIRES.pop('_')
if not VERSION:
raise RuntimeError('version is not set')
try:
PROCESS = subprocess.Popen(
['git', 'rev-list', '--count', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
COMMIT_COUNT, ERR = PROCESS.communicate()
if COMMIT_COUNT:
PROCESS = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
COMMIT_HASH, ERR = PROCESS.communicate()
if COMMIT_HASH:
match = re.match(r'(\d).(\d).(\d)(a|b|rc)?', os.getenv('tag_name') or "")
if (match and match[4]) or not match:
VERSION += ('' if match else 'a') + COMMIT_COUNT.decode('utf-8').strip() + '+g' + COMMIT_HASH.decode('utf-8').strip()
# Also attempt to retrieve a branch, when applicable
PROCESS = subprocess.Popen(
['git', 'symbolic-ref', '-q', '--short', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
COMMIT_BRANCH, ERR = PROCESS.communicate()
if COMMIT_BRANCH:
VERSION += "." + re.sub('[^a-zA-Z0-9.]', '.', COMMIT_BRANCH.decode('utf-8').strip())
except FileNotFoundError:
pass
with open(ROOT / 'README.md', 'r', encoding='utf-8') as f:
README = f.read()
setup(
name='jishaku',
author='Devon (Gorialis) R',
url='https://github.com/Gorialis/jishaku',
license='MIT',
description='A discord.py extension including useful tools for bot development and debugging.',
long_description=README,
long_description_content_type='text/markdown',
project_urls={
'Documentation': 'https://jishaku.readthedocs.io/en/latest/',
'Code': 'https://github.com/Gorialis/jishaku',
'Issue tracker': 'https://github.com/Gorialis/jishaku/issues'
},
version=VERSION,
packages=['jishaku', 'jishaku.features', 'jishaku.repl', 'jishaku.shim'],
include_package_data=True,
install_requires=REQUIREMENTS,
python_requires='>=3.8.0',
extras_require=EXTRA_REQUIRES,
download_url='https://github.com/Gorialis/jishaku/archive/{}.tar.gz'.format(VERSION),
keywords='jishaku discord.py discord cog repl extension',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Testing',
'Topic :: Utilities'
]
)
| 33.75 | 133 | 0.643827 |
aceba162394706e033c6c0919922d9c680b0427b | 855 | py | Python | src/sentry/rules/actions/services.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/rules/actions/services.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/rules/actions/services.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
class PluginService(object):
def __init__(self, obj):
self.service = obj
@property
def slug(self):
return self.service.slug
@property
def title(self):
return self.service.get_title()
@property
def service_type(self):
return "plugin"
class LegacyPluginService(PluginService):
def __init__(self, obj):
super(LegacyPluginService, self).__init__(obj)
self.service = obj
@property
def service_type(self):
return "legacy_plugin"
class SentryAppService(PluginService):
def __init__(self, obj):
super(SentryAppService, self).__init__(obj)
self.service = obj
@property
def title(self):
return self.service.name
@property
def service_type(self):
return "sentry_app"
| 19.883721 | 54 | 0.650292 |
aceba163391c926afd4cba484e0cb2485fa8712e | 4,600 | py | Python | hackq_trivia/searcher.py | Exaphis/HackQ-Trivia | e42314e63295d53018471514a46ea7febea6db19 | [
"MIT"
] | 120 | 2018-02-16T02:08:20.000Z | 2021-11-16T12:14:43.000Z | hackq_trivia/searcher.py | Exaphis/HackQ-Trivia | e42314e63295d53018471514a46ea7febea6db19 | [
"MIT"
] | 195 | 2018-02-16T20:13:10.000Z | 2021-04-06T18:57:28.000Z | hackq_trivia/searcher.py | Exaphis/HackQ-Trivia | e42314e63295d53018471514a46ea7febea6db19 | [
"MIT"
] | 80 | 2018-02-16T22:01:44.000Z | 2021-07-29T14:40:10.000Z | import asyncio
import logging
from html import unescape
from typing import Iterable, List
import aiohttp
import bs4
from anyascii import anyascii
from hackq_trivia.config import config
class InvalidSearchServiceError(Exception):
"""Raise when search service specified in config is not recognized."""
class Searcher:
HEADERS = {"User-Agent": "HQbot"}
BING_ENDPOINT = "https://api.bing.microsoft.com/v7.0/search"
GOOGLE_ENDPOINT = "https://www.googleapis.com/customsearch/v1"
def __init__(self):
self.timeout = config.getfloat("CONNECTION", "Timeout")
self.search_service = config.get("SEARCH", "Service")
bing_api_key = config.get("SEARCH", "BingApiKey")
self.bing_headers = {"Ocp-Apim-Subscription-Key": bing_api_key}
self.google_cse_id = config.get("SEARCH", "GoogleCseId")
self.google_api_key = config.get("SEARCH", "GoogleApiKey")
# don't use default headers for Bing search so searcher tests
# can run get_bing_links/get_google_links on its own
# without depending on search_service being set correctly
self.search_session = aiohttp.ClientSession()
if self.search_service == "Bing":
self.search_func = self.get_bing_links
elif self.search_service == "Google":
self.search_func = self.get_google_links
else:
raise InvalidSearchServiceError(
f"Search service type {self.search_service} was not recognized."
)
client_timeout = aiohttp.ClientTimeout(total=self.timeout)
self.fetch_session = aiohttp.ClientSession(
headers=Searcher.HEADERS, timeout=client_timeout
)
self.logger = logging.getLogger(__name__)
async def close(self) -> None:
await self.fetch_session.close()
await self.search_session.close()
async def fetch(self, url: str) -> str:
try:
async with self.fetch_session.get(url, timeout=self.timeout) as response:
return await response.text()
except asyncio.TimeoutError:
self.logger.error(f"Server timeout to {url}")
except Exception as e:
self.logger.error(f"Server error to {url}")
self.logger.error(e)
return ""
# no typing info for return value because https://github.com/python/typeshed/issues/2652
async def fetch_multiple(self, urls: Iterable[str]):
coroutines = [self.fetch(url) for url in urls]
responses = await asyncio.gather(*coroutines)
return responses
async def get_search_links(self, query: str, num_results: int) -> List[str]:
return await self.search_func(query, num_results)
async def get_google_links(self, query: str, num_results: int) -> List[str]:
search_params = {
"key": self.google_api_key,
"cx": self.google_cse_id,
"q": query,
"num": num_results,
}
async with self.search_session.get(
self.GOOGLE_ENDPOINT, params=search_params
) as resp:
resp_status = resp.status
resp_data = await resp.json()
if resp_status != 200:
logging.error(f"Google search failed with status code {resp_status}")
logging.error(resp_data)
return []
self.logger.debug(f"google: {query}, n={num_results}")
self.logger.debug(resp_data)
return [item["link"] for item in resp_data["items"]]
async def get_bing_links(self, query: str, num_results: int) -> List[str]:
# why does Bing consistently deliver 1 fewer result than requested?
search_params = {"q": query, "count": num_results + 1}
async with self.search_session.get(
self.BING_ENDPOINT, params=search_params, headers=self.bing_headers
) as resp:
resp_status = resp.status
resp_data = await resp.json()
if resp_status != 200:
logging.error(f"Bing search failed with status code {resp_status}")
logging.error(resp_data)
return []
self.logger.debug(f"bing: {query}, n={num_results}")
self.logger.debug(resp_data)
return [item["url"] for item in resp_data["webPages"]["value"]]
@staticmethod
def html_to_visible_text(html):
soup = bs4.BeautifulSoup(html, features="html.parser")
for s in soup(["style", "script", "[document]", "head", "title"]):
s.extract()
return anyascii(unescape(soup.get_text())).lower()
| 35.9375 | 92 | 0.63413 |
aceba1812573135036abbbb1f861f82fa324f947 | 284 | py | Python | uploadPassword.py | TheLion/3d-print-monitor-moonraker | ee58808dbd8cab9f4771e6f943f2830da9505dfd | [
"MIT"
] | 14 | 2019-10-10T03:10:40.000Z | 2022-02-08T17:30:15.000Z | uploadPassword.py | TheLion/3d-print-monitor-moonraker | ee58808dbd8cab9f4771e6f943f2830da9505dfd | [
"MIT"
] | 11 | 2019-10-12T06:07:30.000Z | 2020-11-21T23:19:06.000Z | uploadPassword.py | TheLion/3d-print-monitor-moonraker | ee58808dbd8cab9f4771e6f943f2830da9505dfd | [
"MIT"
] | 3 | 2019-10-19T21:42:59.000Z | 2022-02-08T17:32:44.000Z | Import("env")
try:
import configparser
except ImportError:
import ConfigParser as configparser
config = configparser.ConfigParser()
config.read("uploadPassword.ini")
password = config.get("password", "upload_password")
env.Replace(
UPLOAD_FLAGS="--auth=" + password
)
| 17.75 | 52 | 0.739437 |
aceba1cf0ee07b0207f31475e116815b6d10b5ab | 2,037 | py | Python | pl_bolts/utils/__init__.py | Aayush-Jain01/lightning-bolts | 8ab2f3ea45bf1d3728007dae54a7f11ef5fe4a39 | [
"Apache-2.0"
] | 504 | 2020-03-25T16:03:32.000Z | 2022-03-31T20:45:25.000Z | pl_bolts/utils/__init__.py | Aayush-Jain01/lightning-bolts | 8ab2f3ea45bf1d3728007dae54a7f11ef5fe4a39 | [
"Apache-2.0"
] | 250 | 2020-03-30T04:15:33.000Z | 2022-03-29T12:08:52.000Z | pl_bolts/utils/__init__.py | Aayush-Jain01/lightning-bolts | 8ab2f3ea45bf1d3728007dae54a7f11ef5fe4a39 | [
"Apache-2.0"
] | 134 | 2021-03-10T01:10:53.000Z | 2022-03-31T11:15:19.000Z | import importlib
import operator
from typing import Callable
import torch
from packaging.version import Version
from pkg_resources import DistributionNotFound
from pytorch_lightning.utilities import _module_available
from pl_bolts.callbacks.verification.batch_gradient import BatchGradientVerification # type: ignore
# Ported from https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/utilities/imports.py
def _compare_version(package: str, op: Callable, version: str) -> bool:
"""Compare package version with some requirements.
>>> _compare_version("torch", operator.ge, "0.1")
True
"""
try:
pkg = importlib.import_module(package)
except (ModuleNotFoundError, DistributionNotFound):
return False
try:
pkg_version = Version(pkg.__version__)
except TypeError:
# this is mock by sphinx, so it shall return True ro generate all summaries
return True
return op(pkg_version, Version(version))
_NATIVE_AMP_AVAILABLE: bool = _module_available("torch.cuda.amp") and hasattr(torch.cuda.amp, "autocast")
_TORCHVISION_AVAILABLE: bool = _module_available("torchvision")
_GYM_AVAILABLE: bool = _module_available("gym")
_SKLEARN_AVAILABLE: bool = _module_available("sklearn")
_PIL_AVAILABLE: bool = _module_available("PIL")
_OPENCV_AVAILABLE: bool = _module_available("cv2")
_WANDB_AVAILABLE: bool = _module_available("wandb")
_MATPLOTLIB_AVAILABLE: bool = _module_available("matplotlib")
_TORCHVISION_LESS_THAN_0_9_1: bool = _compare_version("torchvision", operator.lt, "0.9.1")
_PL_GREATER_EQUAL_1_4 = _compare_version("pytorch_lightning", operator.ge, "1.4.0")
_PL_GREATER_EQUAL_1_4_5 = _compare_version("pytorch_lightning", operator.ge, "1.4.5")
_TORCH_ORT_AVAILABLE = _module_available("torch_ort")
_TORCH_MAX_VERSION_SPARSEML = _compare_version("torch", operator.lt, "1.10.0")
_SPARSEML_AVAILABLE = _module_available("sparseml") and _PL_GREATER_EQUAL_1_4_5 and _TORCH_MAX_VERSION_SPARSEML
__all__ = ["BatchGradientVerification"]
| 41.571429 | 118 | 0.784978 |
aceba5ed8f550324d7e8648f7cbf6d4b53173ecb | 161,340 | py | Python | src/azure-cli/azure/cli/command_modules/acs/custom.py | wanlwanl/azure-cli | 3d89040f4f6e64784f66ed3ea9290530bd5c57b6 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/custom.py | wanlwanl/azure-cli | 3d89040f4f6e64784f66ed3ea9290530bd5c57b6 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/custom.py | wanlwanl/azure-cli | 3d89040f4f6e64784f66ed3ea9290530bd5c57b6 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2020_09_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_09_01.models import AgentPool
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_managed_clusters
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None,
kubelogin_version='latest', kubelogin_install_location=None):
k8s_install_kubectl(cmd, client_version, install_location)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
source_url = 'https://github.com/Azure/kubelogin/releases/download'
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles if k.lower() == 'kubeDashboard'.lower()),
ManagedClusterAddonProfile(enabled=False))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on '
'by running "az aks enable-addons --addons kube-dashboard".\n'
'Starting with Kubernetes 1.19, AKS no longer support installation of '
'the managed kube-dashboard addon.\n'
'Please use the Kubernetes resources view in the Azure portal (preview) instead.')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. For now, We just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
if service_principal_profile is None:
logger.warning('The cluster is an MSI cluster, please manually grant '
'Network Contributor role to the system assigned identity '
'after the cluster is created, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity')
else:
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = monitoring or (enable_managed_identity and attach_acr)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard',
'azure-policy': 'azurepolicy'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
_upgrade_single_nodepool_image_version(True, client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == 'kubeDashboard':
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None, aci_subnet_name=None, vnet_subnet_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
addon_profiles['aciConnectorLinux'] = ManagedClusterAddonProfile(
enabled=True,
config={'SubnetName': aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
no_wait=False):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
managed_cluster_client = cf_managed_clusters(cmd.cli_ctx)
return _upgrade_single_nodepool_image_version(no_wait,
managed_cluster_client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
| 45.243971 | 222 | 0.652008 |
aceba621065840adbabb33b8877cd505c311228e | 7,591 | py | Python | dashboard/dashboard/dump_graph_json_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 1 | 2019-05-18T02:43:02.000Z | 2019-05-18T02:43:02.000Z | dashboard/dashboard/dump_graph_json_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 5 | 2020-09-07T12:36:46.000Z | 2022-03-02T05:49:30.000Z | dashboard/dashboard/dump_graph_json_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 1 | 2020-07-25T00:02:48.000Z | 2020-07-25T00:02:48.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import dump_graph_json
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from dashboard.models import sheriff
class DumpGraphJsonTest(testing_common.TestCase):
def setUp(self):
super(DumpGraphJsonTest, self).setUp()
app = webapp2.WSGIApplication([(
'/dump_graph_json', dump_graph_json.DumpGraphJsonHandler)])
self.testapp = webtest.TestApp(app)
def testGet_DumpJson_Basic(self):
# Insert a test with no rows or alerts.
testing_common.AddTests('M', 'b', {'foo': {}})
# When a request is made for this one test, three entities should
# be returned: the Master, Bot and TestMetadata entities.
response = self.testapp.get('/dump_graph_json', {'test_path': 'M/b/foo'})
protobuf_strings = json.loads(response.body)
entities = list(
map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings))
self.assertEqual(3, len(entities))
masters = _EntitiesOfKind(entities, 'Master')
bots = _EntitiesOfKind(entities, 'Bot')
tests = _EntitiesOfKind(entities, 'TestMetadata')
self.assertEqual('M', masters[0].key.string_id())
self.assertEqual('b', bots[0].key.string_id())
self.assertEqual('M/b/foo', tests[0].key.string_id())
def testGet_DumpJson_WithRows(self):
# Insert a test with rows.
testing_common.AddTests('M', 'b', {'foo': {}})
test_key = utils.TestKey('M/b/foo')
test_container_key = utils.GetTestContainerKey(test_key)
rows = []
# The upper limit for revision numbers in this test; this was added
# so that the test doesn't depend on the value of _DEFAULT_MAX_POINTS.
highest_rev = 2000 + dump_graph_json._DEFAULT_MAX_POINTS - 1
for rev in range(1000, highest_rev + 1):
row = graph_data.Row(parent=test_container_key, id=rev, value=(rev * 2))
rows.append(row)
ndb.put_multi(rows)
# There is a maximum number of rows returned by default, and the rows
# are listed with latest revisions first.
response = self.testapp.get(
'/dump_graph_json',
{'test_path': 'M/b/foo'})
protobuf_strings = json.loads(response.body)
entities = list(
map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings))
out_rows = _EntitiesOfKind(entities, 'Row')
expected_num_rows = dump_graph_json._DEFAULT_MAX_POINTS
self.assertEqual(expected_num_rows, len(out_rows))
expected_rev_range = range(
highest_rev, highest_rev + 1 - expected_num_rows, -1)
for expected_rev, row in zip(expected_rev_range, out_rows):
self.assertEqual(expected_rev, row.revision)
self.assertEqual(expected_rev * 2, row.value)
# Specifying end_rev sets the final revision.
response = self.testapp.get(
'/dump_graph_json',
{'test_path': 'M/b/foo', 'end_rev': 1199})
protobuf_strings = json.loads(response.body)
entities = list(
map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings))
out_rows = _EntitiesOfKind(entities, 'Row')
expected_num_rows = min(dump_graph_json._DEFAULT_MAX_POINTS, 200)
self.assertEqual(expected_num_rows, len(out_rows))
self.assertEqual(1199, out_rows[0].revision)
# An alternative max number of rows can be specified.
response = self.testapp.get(
'/dump_graph_json',
{'test_path': 'M/b/foo', 'num_points': 4})
protobuf_strings = json.loads(response.body)
entities = list(
map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings))
out_rows = _EntitiesOfKind(entities, 'Row')
rev_nums = [row.revision for row in out_rows]
expected_rev_range = range(highest_rev, highest_rev - 4, -1)
self.assertEqual(expected_rev_range, rev_nums)
def testDumpJsonWithAlertData(self):
testing_common.AddTests('M', 'b', {'foo': {}})
test_key = utils.TestKey('M/b/foo')
sheriff_key = sheriff.Sheriff(email='example@google.com').put()
anomaly.Anomaly(sheriff=sheriff_key, test=test_key).put()
# Anomaly entities for the requested test, as well as sheriffs for
# the aforementioned Anomaly, should be returned.
response = self.testapp.get(
'/dump_graph_json',
{'test_path': 'M/b/foo'})
protobuf_strings = json.loads(response.body)
self.assertEqual(5, len(protobuf_strings))
entities = list(
map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings))
anomalies = _EntitiesOfKind(entities, 'Anomaly')
sheriffs = _EntitiesOfKind(entities, 'Sheriff')
self.assertEqual(1, len(anomalies))
self.assertEqual(1, len(sheriffs))
self.assertEqual('example@google.com', sheriffs[0].email)
def testGet_DumpAnomaliesDataForSheriff(self):
# Insert some test, sheriffs and alerts.
testing_common.AddTests('M', 'b', {'foo': {}})
testing_common.AddTests('M', 'b', {'bar': {}})
test_key_foo = utils.TestKey('M/b/foo')
test_key_bar = utils.TestKey('M/b/bar')
test_con_foo_key = utils.GetTestContainerKey(test_key_foo)
test_con_bar_key = utils.GetTestContainerKey(test_key_bar)
chromium_sheriff = sheriff.Sheriff(
id='Chromium Perf Sheriff', email='chrisphan@google.com').put()
qa_sheriff = sheriff.Sheriff(
id='QA Perf Sheriff', email='chrisphan@google.com').put()
anomaly.Anomaly(sheriff=chromium_sheriff, test=test_key_foo).put()
anomaly.Anomaly(sheriff=qa_sheriff, test=test_key_bar).put()
default_max_points = dump_graph_json._DEFAULT_MAX_POINTS
# Add some rows.
rows = []
for rev in range(1, default_max_points * 2):
row = graph_data.Row(parent=test_con_foo_key, id=rev, value=(rev * 2))
rows.append(row)
row = graph_data.Row(parent=test_con_bar_key, id=rev, value=(rev * 2))
rows.append(row)
ndb.put_multi(rows)
# Anomaly entities, Row entities, TestMetadata, and Sheriff entities for
# parameter 'sheriff' should be returned.
response = self.testapp.get(
'/dump_graph_json',
{'sheriff': 'Chromium Perf Sheriff'})
protobuf_strings = json.loads(response.body)
self.assertEqual(default_max_points + 5, len(protobuf_strings))
entities = list(
map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings))
rows = _EntitiesOfKind(entities, 'Row')
anomalies = _EntitiesOfKind(entities, 'Anomaly')
sheriffs = _EntitiesOfKind(entities, 'Sheriff')
self.assertEqual(default_max_points, len(rows))
self.assertEqual(1, len(anomalies))
self.assertEqual(1, len(sheriffs))
self.assertEqual('Chromium Perf Sheriff', sheriffs[0].key.string_id())
def testGet_NoTestPath_ReturnsError(self):
# If no test path is given, an error is reported.
self.testapp.get('/dump_graph_json', {}, status=500)
def testGet_InvalidTestPath_ReturnsError(self):
# If a wrong test path is given, JSON for an empty list is returned.
response = self.testapp.get('/dump_graph_json', {'test_path': 'x'})
self.assertEqual('[]', response.body)
def _EntitiesOfKind(entities, kind):
"""Returns a sublist of entities that are of a certain kind."""
return [e for e in entities if e.key.kind() == kind]
if __name__ == '__main__':
unittest.main()
| 40.811828 | 78 | 0.71295 |
aceba6e7647071593734ab857ce1412d83fe6c4b | 2,479 | py | Python | questionbank/comments/tests/test_urls.py | SyafiqTermizi/questionbank | 33e58db1a1610a85bd30a85d2f52e819bc27058b | [
"MIT"
] | 1 | 2018-04-17T23:58:46.000Z | 2018-04-17T23:58:46.000Z | questionbank/comments/tests/test_urls.py | SyafiqTermizi/questionbank | 33e58db1a1610a85bd30a85d2f52e819bc27058b | [
"MIT"
] | 8 | 2019-12-04T23:08:00.000Z | 2022-02-13T22:48:26.000Z | questionbank/comments/tests/test_urls.py | SyafiqTermizi/questionbank | 33e58db1a1610a85bd30a85d2f52e819bc27058b | [
"MIT"
] | null | null | null | from django.urls import reverse, resolve
def test_exam_list():
assert reverse(
'comments:exam_list', kwargs={'exam_id': 1}
) == '/comments/exams/1/'
assert resolve('/comments/exams/1/').view_name == 'comments:exam_list'
def test_exam_create():
assert reverse(
'comments:exam_create', kwargs={'exam_id': 1}
) == '/comments/exams/1/create/'
assert resolve('/comments/exams/1/create/').view_name == 'comments:exam_create'
def test_exam_update():
assert reverse(
'comments:exam_update', kwargs={'exam_id': 1, 'pk': 1}
) == '/comments/exams/1/update/1/'
assert resolve('/comments/exams/1/update/1/').view_name == 'comments:exam_update'
def test_exam_delete():
assert reverse(
'comments:exam_delete', kwargs={'exam_id': 1, 'pk': 1}
) == '/comments/exams/1/delete/1/'
assert resolve('/comments/exams/1/delete/1/').view_name == 'comments:exam_delete'
def test_exam_resolve():
assert reverse(
'comments:exam_resolve', kwargs={'exam_id': 1, 'pk': 1}
) == '/comments/exams/1/resolve/1/'
assert resolve('/comments/exams/1/resolve/1/').view_name == 'comments:exam_resolve'
def test_question_list():
assert reverse(
'comments:question_list', kwargs={'question_id': 1}
) == '/comments/questions/1/'
assert resolve('/comments/questions/1/').view_name == 'comments:question_list'
def test_question_create():
assert reverse(
'comments:question_create', kwargs={'question_id': 1}
) == '/comments/questions/1/create/'
assert resolve(
'/comments/questions/1/create/'
).view_name == 'comments:question_create'
def test_question_update():
assert reverse(
'comments:question_update', kwargs={'question_id': 1, 'pk': 1}
) == '/comments/questions/1/update/1/'
assert resolve(
'/comments/questions/1/update/1/'
).view_name == 'comments:question_update'
def test_question_delete():
assert reverse(
'comments:question_delete', kwargs={'question_id': 1, 'pk': 1}
) == '/comments/questions/1/delete/1/'
assert resolve(
'/comments/questions/1/delete/1/'
).view_name == 'comments:question_delete'
def test_question_resolve():
assert reverse(
'comments:question_resolve', kwargs={'question_id': 1, 'pk': 1}
) == '/comments/questions/1/resolve/1/'
assert resolve(
'/comments/questions/1/resolve/1/'
).view_name == 'comments:question_resolve'
| 29.511905 | 87 | 0.652683 |
aceba6eb721bfc18c9dad30cbd93a80f98da07c9 | 5,240 | py | Python | learntools/deep_learning/exercise_3.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 359 | 2018-03-23T15:57:52.000Z | 2022-03-25T21:56:28.000Z | learntools/deep_learning/exercise_3.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 84 | 2018-06-14T00:06:52.000Z | 2022-02-08T17:25:54.000Z | learntools/deep_learning/exercise_3.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 213 | 2018-05-02T19:06:31.000Z | 2022-03-20T15:40:34.000Z | import numpy as np
import os
from os.path import join
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from learntools.deep_learning.decode_predictions import decode_predictions
from learntools.core import *
class IsHotDog(CodingProblem):
_vars = ['is_hot_dog', 'preds']
_hint = "Save the results of `decode_predictions(preds)`. The label for each item d in the resulting list is at d[0][1]"
_solution = CS(
"""
def is_hot_dog(preds):
decoded = decode_predictions(preds, top=1)
# pull out predicted label, which is in d[0][1] due to how decode_predictions structures results
labels = [d[0][1] for d in decoded]
out = [l == 'hotdog' for l in labels]
return out
"""
)
def check(self, is_hot_dog, preds):
output = is_hot_dog(preds)
assert (type(output) != type(None)), ("You don't have a return statement in `is_hot_dog`")
assert (output == [True, True, False, False]), ("Expected output is [True, True, False, False]. Actual output of is_hot_dog was {}".format(output))
class ScoreHotDogModel(CodingProblem):
_vars = ['calc_accuracy', 'my_model']
_hint = ("Make predictions for the hotdog images. See which are labeled as hot dogs and sum up the number correct. "
"Then do the same for the other images. Total up the number correct, and divide it by the total number of images. "
"The returned value from your function should be a single number.")
_solution = CS(
"""
def calc_accuracy(model, paths_to_hotdog_images, paths_to_other_images):
# We'll use the counts for denominator of accuracy calculation
num_hot_dog_images = len(paths_to_hotdog_images)
num_other_images = len(paths_to_other_images)
hotdog_image_data = read_and_prep_images(paths_to_hotdog_images)
preds_for_hotdogs = model.predict(hotdog_image_data)
# Summing list of binary variables gives a count of True values
num_correct_hotdog_preds = sum(is_hot_dog(preds_for_hotdogs))
other_image_data = read_and_prep_images(paths_to_other_images)
preds_other_images = model.predict(other_image_data)
# Number correct is the number judged not to be hot dogs
num_correct_other_preds = num_other_images - sum(is_hot_dog(preds_other_images))
total_correct = num_correct_hotdog_preds + num_correct_other_preds
total_preds = num_hot_dog_images + num_other_images
return total_correct / total_preds
""")
def check(self, calc_accuracy, my_model):
correct_acc = 0.85
print("Testing model on larger dataset. This takes a few seconds. \n\n")
paths_to_hodog_images, paths_to_other_images = get_paths_for_testing()
acc = calc_accuracy(my_model, paths_to_hodog_images, paths_to_other_images)
assert (acc is not None), ("Your function did not return a value. It should return the accuracy")
assert (acc<=1), ("Your function should return a number between 0 and 1 (a fraction correct). Instead it returned {}".format(acc))
assert (acc > 0.5), ("Expected a returned value of around {}. Your function returned {}".format(correct_acc, acc))
print("Larger dataset model accuracy: {}".format(acc))
class TryVGG(CodingProblem):
_vars = ['vgg16_accuracy', 'vgg16_model', 'calc_accuracy']
_hint = ("One line of your code is `vgg16_model = VGG16(weights='../input/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5')`. ")
_solution = CS(
"""
from tensorflow.keras.applications import VGG16
vgg16_model = VGG16(weights='../input/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5')
vgg16_accuracy = calc_accuracy(vgg16_model, hot_dog_paths, not_hot_dog_paths)
"""
)
def check(self, vgg16_accuracy, vgg16_model, calc_accuracy):
assert (len(vgg16_model.layers) == 23), ("It doesn't appear you've loaded vgg16_model correctly")
assert (vgg16_accuracy > 0.9), ("vgg16_accuracy on small dataset was expected "
"to be 1 but you had a value of {}".format())
print("Testing VGG16 on a larger dataset. This can take a few seconds\n\n")
paths_to_hodog_images, paths_to_other_images = get_paths_for_testing()
acc = calc_accuracy(vgg16_model, paths_to_hodog_images, paths_to_other_images)
print("Accuracy of VGG16 on larger dataset is {}".format(acc))
qvars = bind_exercises(globals(), [
IsHotDog,
ScoreHotDogModel,
TryVGG
],
var_format='q_{n}',
)
__all__ = list(qvars)
# Utility functions called check methods are below this line
def get_paths_for_testing(hot_dog_image_dir='../input/hot-dog-not-hot-dog/seefood/train/hot_dog',
not_hot_dog_image_dir='../input/hot-dog-not-hot-dog/seefood/train/not_hot_dog'):
images_per_category = 20
def get_file_paths(dir_path):
fnames = os.listdir(dir_path)
return [join(dir_path, fn) for fn in fnames]
larger_hd_paths = get_file_paths(hot_dog_image_dir)[:images_per_category]
larger_not_hd_paths = get_file_paths(not_hot_dog_image_dir)[:images_per_category]
return larger_hd_paths, larger_not_hd_paths
| 44.786325 | 155 | 0.724046 |
aceba7693f274d43e4292d24a2f3b38ce550cc2d | 954 | py | Python | plantcv/plantcv/hyperspectral/__init__.py | bganglia/plantcv | e6d28862d959811e941a319086801593d3a081b1 | [
"MIT"
] | null | null | null | plantcv/plantcv/hyperspectral/__init__.py | bganglia/plantcv | e6d28862d959811e941a319086801593d3a081b1 | [
"MIT"
] | null | null | null | plantcv/plantcv/hyperspectral/__init__.py | bganglia/plantcv | e6d28862d959811e941a319086801593d3a081b1 | [
"MIT"
] | null | null | null | from plantcv.plantcv.hyperspectral.read_data import _find_closest
from plantcv.plantcv.hyperspectral.read_data import _make_pseudo_rgb
from plantcv.plantcv.hyperspectral.read_data import read_data
from plantcv.plantcv.hyperspectral.extract_wavelength import extract_wavelength
from plantcv.plantcv.hyperspectral.extract_index import extract_index
from plantcv.plantcv.hyperspectral.analyze_index import analyze_index
from plantcv.plantcv.hyperspectral.analyze_spectral import analyze_spectral
from plantcv.plantcv.hyperspectral.calibrate import calibrate
from plantcv.plantcv.hyperspectral._avg_reflectance import _avg_reflectance
from plantcv.plantcv.hyperspectral._inverse_covariance import _inverse_covariance
# add new functions to end of lists
__all__ = ["read_data", "_find_closest", "extract_index", "analyze_spectral", "analyze_index", "calibrate",
"_make_pseudo_rgb", "extract_wavelength", "_avg_reflectance", "_inverse_covariance"]
| 63.6 | 107 | 0.858491 |
aceba79ad4673789ebdec5ab4159dc22caa4b586 | 1,022 | py | Python | src/dacirco/client/client.py | albertoblanc/dacirco | 965a2e4ad49ec7754eb42442a570bf6d1bf00e89 | [
"MIT"
] | null | null | null | src/dacirco/client/client.py | albertoblanc/dacirco | 965a2e4ad49ec7754eb42442a570bf6d1bf00e89 | [
"MIT"
] | null | null | null | src/dacirco/client/client.py | albertoblanc/dacirco | 965a2e4ad49ec7754eb42442a570bf6d1bf00e89 | [
"MIT"
] | null | null | null | import click
import grpc
from dacirco.proto.dacirco_pb2 import TCRequest
from dacirco.proto.dacirco_pb2_grpc import DaCircogRPCServiceStub
@click.command("cli", context_settings={"show_default": True})
@click.option("--input-video", default="test-1-10s.mov", help="The video id")
@click.option("--output-video", default="out-test-1-10s.mov", help="The video id")
@click.option("--rate", default=7000, help="The desired bitrate")
@click.option("--speed", default="fast", help="The desired speed")
def submit_request(input_video: str, output_video: str, rate: int, speed: str):
with grpc.insecure_channel("localhost:50051") as channel:
stub = DaCircogRPCServiceStub(channel)
response = stub.submit_request(
TCRequest(
input_video=input_video,
bitrate=rate,
speed=speed,
output_video=output_video,
)
)
print(f"TC client received: {response.success}")
if __name__ == "__main__":
submit_request()
| 34.066667 | 82 | 0.670254 |
aceba7afc4531cd587cc2521b94fb77b054516f2 | 859 | py | Python | registers/migrations/0008_auto_20181026_1110.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | registers/migrations/0008_auto_20181026_1110.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | registers/migrations/0008_auto_20181026_1110.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.9 on 2018-10-26 03:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registers', '0007_auto_20181023_1338'),
]
operations = [
migrations.RemoveField(
model_name='changeapproval',
name='approver',
),
migrations.RemoveField(
model_name='changeapproval',
name='change_request',
),
migrations.AlterField(
model_name='changerequest',
name='status',
field=models.SmallIntegerField(choices=[(0, 'Draft'), (1, 'Submitted for endorsement'), (2, 'Scheduled for CAB'), (3, 'Ready'), (4, 'Complete'), (5, 'Rolled back')], db_index=True, default=0),
),
migrations.DeleteModel(
name='ChangeApproval',
),
]
| 28.633333 | 204 | 0.575087 |
aceba81782426fcd1372f2411c213e0f16ff81d3 | 2,772 | py | Python | Empirical_Roofline_Tool-1.1.0/Scripts/summary.py | uo-cdux/ert-mirror | 09663aad764d3c6cefa4d9de1f1213b5f03af9df | [
"BSD-3-Clause-LBNL"
] | null | null | null | Empirical_Roofline_Tool-1.1.0/Scripts/summary.py | uo-cdux/ert-mirror | 09663aad764d3c6cefa4d9de1f1213b5f03af9df | [
"BSD-3-Clause-LBNL"
] | null | null | null | Empirical_Roofline_Tool-1.1.0/Scripts/summary.py | uo-cdux/ert-mirror | 09663aad764d3c6cefa4d9de1f1213b5f03af9df | [
"BSD-3-Clause-LBNL"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function,division,unicode_literals
import os,sys,math
from util import PRECISION
def smooth(x,y):
xs = x[:]
ys = y[:]
d = 0
for i in range(0,len(ys)):
num = min(len(ys),i+d+1) - max(0,i-d)
total = sum(ys[max(0,i-d):min(len(ys),i+d+1)])
ys[i] = total/float(num)
return xs,ys
lines = os.sys.stdin.readlines()
begin = 0
end = 0
data = dict()
for i in range(0,len(lines)):
m = lines[i].split()
if (len(m) == 1 and m[0] in PRECISION.__members__) or len(m) == 0:
end = i
if end > begin:
pkey = int(precision.value)
if pkey not in data:
data[pkey] = lines[begin:end]
if len(m):
precision = PRECISION[m[0]]
begin = i+1
if lines[i] == "META_DATA\n":
break
meta_lines = lines[i:]
for pkey in sorted(data.keys()):
temp_lines = data[pkey]
if pkey == PRECISION.fp64.value or PRECISION.fp32.value or PRECISION.fp16.value:
lines = temp_lines
gflops = [float(line.split()[9].strip(',')) for line in temp_lines]
maxgflops = max(gflops)
print(" %7.2f" % maxgflops, end=' ')
init = PRECISION(pkey).name.upper()
print(init, end=' ')
print("GFLOPs")
print()
x = [float(line.split()[0].strip(',').strip('(')) for line in lines]
band = [float(line.split()[6].strip(',')) for line in lines]
weight = 0.0
for i in range(0,len(x)-1):
x1 = math.log(x[i])
y1 = band[i]
x2 = math.log(x[i+1])
y2 = band[i+1]
weight += (y1+y2)/2.0 * (x2-x1)
maxband = max(band)
start = band.index(maxband)
x = x[start:]
band = band[start:]
minband = min(band)
maxband = max(band)
fraction = 1.05
samples = 10000
dband = maxband/float(samples - 1)
counts = samples*[0]
totals = samples*[0.0]
x,band = smooth(x,band)
for i in range(0,samples):
cband = i*dband
for v in band:
if v >= cband/fraction and v <= cband*fraction:
totals[i] += v
counts[i] += 1
band_list = [[1000*maxband,1000]]
maxc = -1
maxi = -1
for i in range(samples-3,1,-1):
if counts[i] > 6:
if counts[i] > maxc:
maxc = counts[i]
maxi = i
else:
if maxc > 1:
value = float(totals[maxi])/max(1,counts[maxi])
if 1.15*value < float(band_list[-1][0])/band_list[-1][1]:
band_list.append([totals[maxi],counts[maxi]])
else:
band_list[-1][0] += totals[maxi]
band_list[-1][1] += counts[maxi]
maxc = -1
maxi = -1
print(" %7.2f Weight" % weight)
print()
band_name_list = ["DRAM"]
cache_num = len(band_list)-1
for cache in range(1,cache_num+1):
band_name_list = ["L%d" % (cache_num+1 - cache)] + band_name_list
for (band,band_name) in zip(band_list,band_name_list):
print(" %7.2f %s" % (float(band[0])/band[1],band_name))
print()
for m in meta_lines:
print(m, end=' ')
| 21 | 82 | 0.601732 |
aceba825bd8397bac60b4f65a54d3522add3ef1a | 1,786 | py | Python | oneFeature/wineplotHist.py | allys-99/class6hw | b46d82efb17188bfcafe18d96fcc4cd8debb9f4a | [
"MIT"
] | null | null | null | oneFeature/wineplotHist.py | allys-99/class6hw | b46d82efb17188bfcafe18d96fcc4cd8debb9f4a | [
"MIT"
] | null | null | null | oneFeature/wineplotHist.py | allys-99/class6hw | b46d82efb17188bfcafe18d96fcc4cd8debb9f4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def WineMain():
myTable = LoadDataSet()
print(myTable)
getSS(myTable)
PlotFeatures(myTable)
def getSS(myTable):
print('\t\t\t\t Mean')
print(myTable.mean())
print('\t\t\t Standard Deviation')
print(myTable.std())
def LoadDataSet():
df = pd.read_table("wine.data.txt", delimiter=",",header=None)
df.columns =['C','Alcohol','Malic Acid','Ash','Alcalinity of Ash',
'Magnesium','Total Phenols','Flavanoids','Nonflavanoid Phenols',
'Proanthocyanins','Colour Intensity','Hue','OD280_OD315 of diluted wines','Proline']
return df
def PlotFeatures(df):
c1 = df.loc[df['C'] == 1]
c2 = df.loc[df['C'] == 2]
c3 = df.loc[df['C'] == 3]
for idx in range(1,len(characteristics)):
feat = characteristics[idx]
c1feat = c1.loc[:,feat]
c2feat = c2.loc[:,feat]
c3feat = c3.loc[:,feat]
PlotHistogram(c1feat,c2feat,c3feat,feat)
def PlotHistogram(c1,c2,c3,v):
plt.xlabel(v)
plt.ylabel('number of samples')
mytitle=v+' in Wine'
plt.title(mytitle)
plt.hist([c1,c2,c3], bins=30, rwidth=0.8,color=['green','yellow','orange'],label=['c1','c2','c3'])
plt.legend()
fig1=plt.gcf()
plt.draw()
plt.show()
fname="hist_"+v+".png"
fig1.savefig(fname)
print ("\n . . . . . W I N E . . . . . \n")
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
characteristics = ['cultivator','Alcohol', 'Malic Acid', 'Ash', 'Alcalinity of Ash',
'Magnesium', 'Total Phenols', 'Flavanoids', 'Nonflavanoid Phenols',
'Proanthocyanins', 'Colour Intensity', 'Hue',
'OD280_OD315 of diluted wines', 'Proline']
WineMain()
| 29.278689 | 102 | 0.609742 |
acebaa9488cfa52490236e1b86826bcdd72f01d2 | 4,538 | py | Python | pinakes/main/approval/services/create_action.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 2 | 2022-03-17T18:53:58.000Z | 2022-03-17T22:04:22.000Z | pinakes/main/approval/services/create_action.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 9 | 2022-03-18T08:22:57.000Z | 2022-03-30T17:14:49.000Z | pinakes/main/approval/services/create_action.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 7 | 2022-03-17T22:03:08.000Z | 2022-03-28T21:28:34.000Z | """Service to create various types of action"""
from django.utils.translation import gettext_lazy as _
from pinakes.main.approval.models import (
Action,
Request,
)
from pinakes.main.approval.exceptions import (
InvalidStateTransitionException,
BlankParameterException,
)
class CreateAction:
"""Service class to create actions"""
def __init__(self, request, options):
self.options = options
self.request = (
request
if isinstance(request, Request)
else Request.objects.get(id=request)
)
self.action = None
def process(self):
from pinakes.main.approval.services.update_request import (
UpdateRequest,
)
operation = self.options["operation"].lower()
request_options = getattr(self, f"_{operation}")(
self.options.get("comments")
)
self.options["request"] = self.request
self.options["tenant"] = self.request.tenant
self.action = Action.objects.create(**self.options)
if request_options:
UpdateRequest(self.request, request_options).process()
return self
def _memo(self, comments):
if not comments:
raise BlankParameterException(
_("The memo message cannot be blank")
)
return {}
def _start(self, _comments):
if not self.request.state == Request.State.PENDING:
raise InvalidStateTransitionException(
_("Current request is not pending state")
)
return {"state": Request.State.STARTED}
def _notify(self, _comments):
if not self.request.state == Request.State.STARTED:
raise InvalidStateTransitionException(
_("Current request is not started state")
)
return {"state": Request.State.NOTIFIED}
def _skip(self, _comments):
if not self.request.state == Request.State.PENDING:
raise InvalidStateTransitionException(
_("Current request is not in pending state")
)
return {"state": Request.State.SKIPPED}
def _approve(self, comments):
if not self.request.state == Request.State.NOTIFIED:
raise InvalidStateTransitionException(
_("Current request is not in notified state")
)
if self.request.is_parent():
raise InvalidStateTransitionException(
_("Only child level request can be approved")
)
return {
"state": Request.State.COMPLETED,
"decision": Request.Decision.APPROVED,
"reason": comments,
}
def _deny(self, comments):
if not self.request.state == Request.State.NOTIFIED:
raise InvalidStateTransitionException(
_("Current request is not in notified state")
)
if self.request.is_parent():
raise InvalidStateTransitionException(
_("Only child level request can be denied")
)
if not comments:
raise BlankParameterException(
_("A reason has to be provided if a request is being denied")
)
return {
"state": Request.State.COMPLETED,
"decision": Request.Decision.DENIED,
"reason": comments,
}
def _cancel(self, comments):
if not self.request.is_root():
raise InvalidStateTransitionException(
_("Only root level request can be canceled")
)
if self.request.has_finished():
raise InvalidStateTransitionException(
_("The request has already finished")
)
return {
"state": Request.State.CANCELED,
"decision": Request.Decision.CANCELED,
"reason": comments,
}
def _error(self, comments):
if self.request.has_finished():
raise InvalidStateTransitionException(
_("Current request has already finished")
)
if not comments:
raise BlankParameterException(_("Failure reason is missing"))
if self.request.is_parent():
raise InvalidStateTransitionException(
_("Only child level request can be flagged error")
)
return {
"state": Request.State.FAILED,
"decision": Request.Decision.ERROR,
"reason": comments,
}
| 29.855263 | 77 | 0.582856 |
acebab9405a12026830629d291324ebba48373e3 | 668 | py | Python | analysis/graph_plot.py | rafilevy/PartII_Project | acbdadb9a228f5a6202996affcb24a302ebd1f5f | [
"MIT"
] | null | null | null | analysis/graph_plot.py | rafilevy/PartII_Project | acbdadb9a228f5a6202996affcb24a302ebd1f5f | [
"MIT"
] | null | null | null | analysis/graph_plot.py | rafilevy/PartII_Project | acbdadb9a228f5a6202996affcb24a302ebd1f5f | [
"MIT"
] | null | null | null | import argparse
import plotly.express as px
import pandas as pd
from sys import argv
parser = argparse.ArgumentParser(description='Plot a graph')
parser.add_argument("path", type=str)
parser.add_argument("--dtick_x", type=float)
parser.add_argument("--dtick_y", type=float)
parser.add_argument("--title", type=str)
parser.add_argument("--x_label", type=str)
parser.add_argument("--y_label", type=str)
args = vars(parser.parse_args())
df = pd.read_csv(args["path"], skiprows=15)
fig = px.line(df, y="CH1", x="TIME")
fig.update_yaxes( title_text=args["y_label"], dtick= args["dtick_y"])
fig.update_xaxes( title_text=args["x_label"], dtick= args["dtick_x"])
fig.show() | 33.4 | 69 | 0.741018 |
acebab95c767f2ed45a4d20efaf294df6a85d0aa | 6,633 | py | Python | data_utils/DistributedSampler.py | S-HuaBomb/MASTER-Paddle | d2d1fcd144f16253fb83cbb7b61f6aa95ace4d8b | [
"MIT"
] | 1 | 2021-10-10T13:22:39.000Z | 2021-10-10T13:22:39.000Z | data_utils/DistributedSampler.py | S-HuaBomb/MASTER-paddle | d2d1fcd144f16253fb83cbb7b61f6aa95ace4d8b | [
"MIT"
] | 2 | 2021-11-16T07:35:32.000Z | 2021-11-16T08:48:02.000Z | data_utils/DistributedSampler.py | S-HuaBomb/MASTER-paddle | d2d1fcd144f16253fb83cbb7b61f6aa95ace4d8b | [
"MIT"
] | null | null | null | #encoding=utf8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import TypeVar, Optional, Iterator
import paddle
from paddle.io import Sampler, Dataset
import paddle.distributed as dist
class DistributedSampler(Sampler):
r"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each
process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a
:class:`~torch.utils.data.DataLoader` sampler, and load a subset of the
original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Args:
dataset: Dataset used for sampling.
num_replicas (int, optional): Number of processes participating in
distributed training. By default, :attr:`world_size` is retrieved from the
current distributed group.
rank (int, optional): Rank of the current process within :attr:`num_replicas`.
By default, :attr:`rank` is retrieved from the current distributed
group.
shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
indices.
seed (int, optional): random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across all
processes in the distributed group. Default: ``0``.
drop_last (bool, optional): if ``True``, then the sampler will drop the
tail of the data to make it evenly divisible across the number of
replicas. If ``False``, the sampler will add extra indices to make
the data evenly divisible across the replicas. Default: ``False``.
.. warning::
In distributed mode, calling the :meth:`set_epoch` method at
the beginning of each epoch **before** creating the :class:`DataLoader` iterator
is necessary to make shuffling work properly across multiple epochs. Otherwise,
the same ordering will be always used.
Example::
>>> sampler = DistributedSampler(dataset) if is_distributed else None
>>> loader = DataLoader(dataset, shuffle=(sampler is None),
... sampler=sampler)
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
... sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None, rank: Optional[int] = None,
shuffle: bool = True, seed: int = 0, drop_last: bool = False) -> None:
super().__init__()
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if rank >= num_replicas or rank < 0:
raise ValueError(
"Invalid rank {}, rank should be in the interval"
" [0, {}]".format(rank, num_replicas - 1))
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
# `type:ignore` is required because Dataset cannot provide a default __len__
# see NOTE in pytorch/torch/utils/data/sampler.py
(len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
)
else:
self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type]
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self) -> Iterator[T_co]:
if self.shuffle: # 默认 False
print(self.shuffle, "Damn!")
# deterministically shuffle based on epoch and seed
# g = torch.Generator()
# g.manual_seed(self.seed + self.epoch)
# indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
| 44.516779 | 105 | 0.636514 |
acebabc1aaf1d66dae9c79b3a9f7ad6e5ea3fbed | 1,515 | py | Python | inbm/dispatcher-agent/tests/unit/test_ota_downloader.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 5 | 2021-12-13T21:19:31.000Z | 2022-01-18T18:29:43.000Z | inbm/dispatcher-agent/tests/unit/test_ota_downloader.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 45 | 2021-12-30T17:21:09.000Z | 2022-03-29T22:47:32.000Z | inbm/dispatcher-agent/tests/unit/test_ota_downloader.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 4 | 2022-01-26T17:42:54.000Z | 2022-03-30T04:48:04.000Z | from unittest import TestCase
from unit.common.mock_resources import *
from dispatcher.dispatcher_exception import DispatcherException
from dispatcher.aota.aota_error import AotaError
from dispatcher.ota_downloader import AotaDownloader, SotaDownloader, FotaDownloader
from dispatcher.packagemanager.memory_repo import MemoryRepo
from mock import patch
ota_element = {'fetch': 'https://abc.tar'}
parsed_manifest = {'uri': 'https://abc.com', 'signature': 'asdf',
'hash_algorithm': '3',
'resource': ota_element,
'username': 'uname',
'password': 'pwd'}
class TestOtaDownloader(TestCase):
def setUp(self):
self.mock_disp_obj = MockDispatcher.build_mock_dispatcher()
def test_download_aota(self):
try:
AotaDownloader(self.mock_disp_obj, parsed_manifest).download()
except (DispatcherException, AotaError):
self.fail("Raised expected when not expected.")
def test_download_sota(self):
try:
SotaDownloader(self.mock_disp_obj, parsed_manifest).download()
except (DispatcherException, AotaError):
self.fail("Raised expected when not expected.")
@patch('dispatcher.ota_downloader.download')
def test_download_fota(self, mock_download):
try:
FotaDownloader(self.mock_disp_obj, parsed_manifest).download()
except (DispatcherException, AotaError):
self.fail("Raised expected when not expected.")
| 36.95122 | 84 | 0.687789 |
acebac258bdcefc56aa27b758538899834a691be | 11,726 | py | Python | ckanext-hdx_org_group/ckanext/hdx_org_group/actions/get.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 58 | 2015-01-11T09:05:15.000Z | 2022-03-17T23:44:07.000Z | ckanext-hdx_org_group/ckanext/hdx_org_group/actions/get.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 1,467 | 2015-01-01T16:47:44.000Z | 2022-02-28T16:51:20.000Z | ckanext-hdx_org_group/ckanext/hdx_org_group/actions/get.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 17 | 2015-05-06T14:04:21.000Z | 2021-11-11T19:58:16.000Z | '''
Created on April 24, 2015
@author: alexandru-m-g
'''
import pylons.config as config
import ckan.logic as logic
import ckan.model as model
import ckan.common as common
import ckan.lib.dictization as d
from ckan.common import c
import ckan.lib.helpers as helpers
import ckanext.hdx_crisis.dao.location_data_access as location_data_access
import ckanext.hdx_org_group.dao.indicator_access as indicator_access
import ckanext.hdx_org_group.dao.widget_data_service as widget_data_service
import ckanext.hdx_org_group.helpers.organization_helper as org_helper
from ckanext.hdx_theme.helpers.caching import cached_make_rest_api_request as cached_make_rest_api_request
import shlex
import subprocess
import random
import logging
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.lib.navl.dictization_functions
_validate = ckan.lib.navl.dictization_functions.validate
ValidationError = logic.ValidationError
log = logging.getLogger(__name__)
json = common.json
get_action = logic.get_action
_get_or_bust = logic.get_or_bust
NotFound = logic.NotFound
IndicatorAccess = indicator_access.IndicatorAccess
@logic.side_effect_free
def hdx_datasets_for_group(context, data_dict):
'''
Returns a paginated list of datasets for a group with 25 items per page.
Options for sorting are: metadata_modified desc, title_case_insensitive desc, title_case_insensitive asc,
views_recent desc, score desc ( only useful if query string is specified, should be combined
with metadata_modified desc )
:param id: the id of the group for which datasets are requested
:type id: string
:param page: page number starting from 1
:type page: int
:param sort: the field by which the datasets should be sorted. Defaults to 'metadata_modified desc'
:type sort: string
:param q: query string
:type q: string
:param type: 'all', 'indicators', 'datasets'. Defaults to 'all'
:type q: string
:return:
'''
skipped_keys = ['q', 'id', 'sort', 'type', 'page']
id = _get_or_bust(data_dict, "id")
limit = 25
sort_option = data_dict.get('sort', 'metadata_modified desc')
page = int(data_dict.get('page', 1))
new_data_dict = {'sort': sort_option,
'rows': limit,
'start': (page - 1) * limit,
}
type = data_dict.get('type', None)
if type == 'indicators':
new_data_dict['ext_indicator'] = u'1'
elif type == 'datasets':
new_data_dict['ext_indicator'] = u'0'
search_param_list = [
key + ":" + value for key, value in data_dict.iteritems() if key not in skipped_keys]
search_param_list.append(u'groups:{}'.format(id))
if search_param_list != None:
new_data_dict['fq'] = " ".join(
search_param_list) + ' +dataset_type:dataset'
if data_dict.get('q', None):
new_data_dict['q'] = data_dict['q']
query = get_action("package_search")(context, new_data_dict)
return query
@logic.side_effect_free
def hdx_topline_num_for_group(context, data_dict):
'''
:param id: the id of the group for which top line numbers are requested
:type id: string
:return: a dict of top line numbers. Please note that depending on the selected group the source
of the data ( either the datastore or CPS/indicators ) might be different. The data will have some fields
that are specific to the source.
'''
id = _get_or_bust(data_dict, "id")
grp_result = get_group(id)
group_info = grp_result.get('group_info')
custom_dict = grp_result.get('custom_dict')
datastore_id = custom_dict.get('topline_resource', None)
common_format = data_dict.get('common_format', True) not in ['false', '0'] # type: bool
# if group_info.get('custom_loc', False) and datastore_id:
# # source is datastore
# crisis_data_access = location_data_access.LocationDataAccess(datastore_id)
# crisis_data_access.fetch_data(context)
# top_line_items = crisis_data_access.get_top_line_items()
# for item in top_line_items:
# item['source_system'] = 'datastore'
# del item['units']
if group_info.get('activity_level') == 'active':
top_line_items = __get_toplines_for_active_country(group_info, common_format)
else:
top_line_items = __get_toplines_for_standard_country(group_info, common_format)
return top_line_items
def __get_toplines_for_active_country(group_info, common_format):
'''
:param group_info:
:type group_info: dict
:param common_format:
:type common_format: bool
:return:
:rtype: list
'''
# source is rw
top_line_data_list = widget_data_service.build_widget_data_access(group_info).get_dataset_results()
if common_format:
def _parse_integer_value(item):
try:
value = float(item.get('value', ''))
except:
value = None
return value
top_line_items = [
{
'source_system': 'reliefweb crisis app',
'code': item.get('indicatorTypeCode', ''),
'title': item.get('indicatorTypeName', ''),
'source_link': item.get('datasetLink', ''),
'source': item.get('sourceName', ''),
'value': _parse_integer_value(item),
'latest_date': item.get('time', ''),
'units': item.get('units', )
}
for item in top_line_data_list
]
else:
top_line_items = top_line_data_list
return top_line_items
def __get_toplines_for_standard_country(group_info, common_format):
'''
:param group_info:
:type group_info: dict
:param common_format:
:type common_format: bool
:return:
:rtype: list
'''
# source is configured in 'hdx.locations.toplines_url'
# ckan_site_url = config.get('ckan.site_url')
raw_top_line_items = widget_data_service.build_widget_data_access(group_info).get_dataset_results()
# ckan_data = indicator_dao.fetch_indicator_data_from_ckan()
top_line_items = []
if common_format:
for item in raw_top_line_items:
code = item.get('indicatorTypeCode', '')
title = item.get('indicatorTypeName', '')
new_item = {
'source_system': 'cps',
'code': code or title,
'title': title or code,
# 'source_link': ckan_site_url + ckan_data.get(code, {}).get('datasetLink', ''),
'source': item.get('sourceName', ''),
'value': item.get('value', ''),
'latest_date': item.get('time', ''),
'units': item.get('unitName', )
}
top_line_items.append(new_item)
else:
top_line_items = raw_top_line_items
return top_line_items
@logic.side_effect_free
def hdx_light_group_show(context, data_dict):
'''
Return a lightweight ( less resource intensive,faster but without datasets ) version of the group details
:param id: the id of the group for which top line numbers are requested
:type id: string
'''
id = _get_or_bust(data_dict, "id")
group_dict = {}
group = model.Group.get(id)
if not group:
raise NotFound
if group.state == 'deleted' and (not c.userobj or not c.userobj.sysadmin):
raise NotFound
# group_dict['group'] = group
group_dict['id'] = group.id
group_dict['name'] = group.name
group_dict['image_url'] = group.image_url
group_dict['display_name'] = group_dict['title'] = group.title
group_dict['description'] = group.description
# group_dict['revision_id'] = group.revision_id
group_dict['state'] = group.state
group_dict['created'] = group.created
group_dict['type'] = group.type
result_list = []
for name, extra in group._extras.iteritems():
dictized = d.table_dictize(extra, context)
if not extra.state == 'active':
continue
value = dictized["value"]
result_list.append(dictized)
# Keeping the above for backwards compatibility
group_dict[name] = dictized["value"]
group_dict['extras'] = sorted(result_list, key=lambda x: x["key"])
return group_dict
def get_group(id):
context = {'model': model, 'session': model.Session,
'include_datasets': False,
'for_view': True}
data_dict = {'id': id}
group_info = get_action('hdx_light_group_show')(context, data_dict)
extras_dict = {item['key']: item['value'] for item in group_info.get('extras', {})}
json_string = extras_dict.get('customization', None)
if json_string:
custom_dict = json.loads(json_string)
else:
custom_dict = {}
return {'group_info': group_info, 'custom_dict': custom_dict}
@logic.side_effect_free
def hdx_trigger_screencap(context, data_dict):
cfg = context['cfg']
file_path = context['file_path']
# checking if user is sysadmin
sysadmin = False
if data_dict.get('reset_thumbnails', 'false') == 'true':
try:
logic.check_access('hdx_trigger_screencap', context, data_dict)
sysadmin = True
except:
return False
if not sysadmin and not context.get('reset', False):
return False
if not cfg['screen_cap_asset_selector']: # If there's no selector set just don't bother
return False
return org_helper.hdx_capturejs(config['ckan.site_url'] + helpers.url_for('organization_read', id=cfg['org_name']),
file_path, cfg['screen_cap_asset_selector'])
@logic.side_effect_free
def hdx_get_locations_info_from_rw(context, data_dict):
try:
url = data_dict.get('rw_url')
if url:
return cached_make_rest_api_request(url)
return None
except:
log.error("RW file was not found or can not be accessed")
return None
@logic.side_effect_free
def hdx_organization_follower_list(context, data_dict):
'''Return the list of users that are following the given organization.
:param id: the id or name of the organization
:type id: string
:rtype: list of dictionaries
'''
logic.check_access('hdx_organization_follower_list', context, data_dict)
context['keep_email'] = True
return _follower_list(
context, data_dict,
ckan.logic.schema.default_follow_group_schema(),
context['model'].UserFollowingGroup)
def _follower_list(context, data_dict, default_schema, FollowerClass):
schema = context.get('schema', default_schema)
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
# Get the list of Follower objects.
model = context['model']
object_id = data_dict.get('id')
followers = FollowerClass.follower_list(object_id)
# Convert the list of Follower objects to a list of User objects.
users = [model.User.get(follower.follower_id) for follower in followers]
users = [user for user in users if user is not None]
# Dictize the list of User objects.
return _user_list_dictize(users, context)
def _user_list_dictize(obj_list, context,
sort_key=lambda x: x['name'], reverse=False):
import ckan.lib.dictization.model_dictize as model_dictize
result_list = []
for obj in obj_list:
user_dict = model_dictize.user_dictize(obj, context)
user_dict.pop('reset_key', None)
user_dict.pop('apikey', None)
# user_dict.pop('email', None)
result_list.append(user_dict)
return sorted(result_list, key=sort_key, reverse=reverse)
| 33.695402 | 119 | 0.664933 |
acebac950069174b3b119d0bf7b1934278b1bed9 | 2,177 | py | Python | tests/components/homekit_controller/specific_devices/test_lg_tv.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | tests/components/homekit_controller/specific_devices/test_lg_tv.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:14:33.000Z | 2022-03-31T06:01:46.000Z | tests/components/homekit_controller/specific_devices/test_lg_tv.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Make sure that handling real world LG HomeKit characteristics isn't broken."""
from homeassistant.components.media_player.const import (
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_SELECT_SOURCE,
)
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
async def test_lg_tv(hass):
"""Test that a Koogeek LS1 can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "lg_tv.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Assert that the entity is correctly added to the entity registry
entry = entity_registry.async_get("media_player.lg_webos_tv_af80")
assert entry.unique_id == "homekit-999AAAAAA999-48"
helper = Helper(
hass, "media_player.lg_webos_tv_af80", pairing, accessories[0], config_entry
)
state = await helper.poll_and_get_state()
# Assert that the friendly name is detected correctly
assert state.attributes["friendly_name"] == "LG webOS TV AF80"
# Assert that all channels were found and that we know which is active.
assert state.attributes["source_list"] == [
"AirPlay",
"Live TV",
"HDMI 1",
"Sony",
"Apple",
"AV",
"HDMI 4",
]
assert state.attributes["source"] == "HDMI 4"
# Assert that all optional features the LS1 supports are detected
assert state.attributes["supported_features"] == (
SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_SELECT_SOURCE
)
# The LG TV doesn't (at least at this patch level) report its media state via
# CURRENT_MEDIA_STATE. Therefore "ok" is the best we can say.
assert state.state == "ok"
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.manufacturer == "LG Electronics"
assert device.name == "LG webOS TV AF80"
assert device.model == "OLED55B9PUA"
assert device.sw_version == "04.71.04"
assert device.via_device_id is None
| 33.492308 | 84 | 0.706017 |
acebac9a83b64fc4aa2bfd53dbd0da5202251c50 | 6,158 | py | Python | scenic/projects/vivit/configs/kinetics400/vivit_base_factorised_encoder.py | NielsRogge/scenic | 4418bf4c6954fffe61d9bafc802981baa9440e49 | [
"Apache-2.0"
] | 1 | 2022-02-17T18:48:43.000Z | 2022-02-17T18:48:43.000Z | scenic/projects/vivit/configs/kinetics400/vivit_base_factorised_encoder.py | NielsRogge/scenic | 4418bf4c6954fffe61d9bafc802981baa9440e49 | [
"Apache-2.0"
] | null | null | null | scenic/projects/vivit/configs/kinetics400/vivit_base_factorised_encoder.py | NielsRogge/scenic | 4418bf4c6954fffe61d9bafc802981baa9440e49 | [
"Apache-2.0"
] | null | null | null | r"""ViViT Base Factorised Encoder model.
"""
import ml_collections
# The size of the Kinetics dataset changes as videos are removed from YouTube.
# Set this appropriately.
KINETICS_400_TRAIN_SIZE = 214834
KINETICS_400_VAL_SIZE = 17637
KINETICS_400_TEST_SIZE = 34579
def get_config():
"""Returns the base experiment configuration."""
config = ml_collections.ConfigDict()
config.experiment_name = 'vivit_kinetics400_classification'
# Dataset
config.dataset_name = 'video_tfrecord_dataset'
config.dataset_configs = ml_collections.ConfigDict()
config.data_dtype_str = 'float32'
config.datset_configs = ml_collections.ConfigDict()
config.dataset_configs.base_dir = (
'/path/to/dataset')
config.dataset_configs.tables = {
'train': 'train.tfrecord@1024',
'validation': 'validation.tfrecord@1024',
'test': 'test.tfrecord@1024'
}
config.dataset_configs.examples_per_subset = {
'train': KINETICS_400_TRAIN_SIZE,
'validation': KINETICS_400_VAL_SIZE,
'test': KINETICS_400_TEST_SIZE
}
config.dataset_configs.num_classes = 400
config.data_dtype_str = 'float32'
# This is going to sample 32 frames, sampled at a stride of 2 from the video.
# Kinetics videos has 250 frames.
config.dataset_configs.num_frames = 32
config.dataset_configs.stride = 2
config.dataset_configs.min_resize = 256
config.dataset_configs.crop_size = 224
config.dataset_configs.one_hot_labels = True
config.dataset_configs.zero_centering = True
# Multicrop evaluation settings:
config.dataset_configs.do_multicrop_test = True # Do during training.
config.dataset_configs.log_test_epochs = 5
# The effective batch size per host when testing is
# num_test_clips * test_batch_size.
config.dataset_configs.num_test_clips = 4
config.dataset_configs.test_batch_size = 8 # Must equal num_local_devices.
# To take three spatial crops when testing.
config.dataset_configs.do_three_spatial_crops = True
config.multicrop_clips_per_device = 2
# Data augmentation.
config.dataset_configs.augmentation_params = ml_collections.ConfigDict()
config.dataset_configs.augmentation_params.do_jitter_scale = True
config.dataset_configs.augmentation_params.scale_min_factor = 0.9
config.dataset_configs.augmentation_params.scale_max_factor = 1.33
config.dataset_configs.augmentation_params.prob_scale_jitter = 1.0
config.dataset_configs.augmentation_params.do_color_augment = True
config.dataset_configs.augmentation_params.prob_color_augment = 0.8
config.dataset_configs.augmentation_params.prob_color_drop = 0.1
config.dataset_configs.prefetch_to_device = 2
# Model.
config.model_name = 'vivit_classification'
config.model = ml_collections.ConfigDict()
config.model.hidden_size = 768
config.model.attention_config = ml_collections.ConfigDict()
config.model.attention_config.type = 'factorized_encoder'
config.model.patches = ml_collections.ConfigDict()
config.model.spatial_transformer = ml_collections.ConfigDict()
config.model.spatial_transformer.num_heads = 12
config.model.spatial_transformer.mlp_dim = 3072
config.model.spatial_transformer.num_layers = 12
config.model.temporal_transformer = ml_collections.ConfigDict()
config.model.temporal_transformer.num_heads = 12
config.model.temporal_transformer.mlp_dim = 3072
config.model.temporal_transformer.num_layers = 4
config.model.representation_size = None
config.model.classifier = 'token'
config.model.attention_dropout_rate = 0.
config.model.dropout_rate = 0.
config.model_dtype_str = 'float32'
config.model.temporal_encoding_config = ml_collections.ConfigDict()
config.model.temporal_encoding_config.method = '3d_conv'
config.model.patches.size = [16, 16, 2]
config.model.temporal_encoding_config.kernel_init_method = 'central_frame_initializer'
# Applies when temporal_encoding_config.method='temporal_sampling'
config.model.temporal_encoding_config.n_sampled_frames = 16 # Unused here.
# Training
config.trainer_name = 'vivit_trainer'
config.optimizer = 'momentum'
config.optimizer_configs = ml_collections.ConfigDict()
config.l2_decay_factor = 0
config.max_grad_norm = 1
config.label_smoothing = None
config.num_training_epochs = 30
config.batch_size = 64
config.rng_seed = 0
# Use ImageNet-21k-initialized model.
config.init_from = ml_collections.ConfigDict()
config.init_from.model_config = None
# Download pretrained ImageNet checkpoints from here:
# https://github.com/google-research/scenic/tree/main/scenic/projects/baselines (checkpoint_format = 'scenic') pylint: disable=line-too-long
# https://github.com/google-research/vision_transformer (checkpoint_format = 'bigvision') pylint: disable=line-too-long
config.init_from.checkpoint_path = 'path_to_checkpoint_of_vit_b_16'
config.init_from.checkpoint_format = 'scenic'
config.init_from.model_config = ml_collections.ConfigDict()
config.init_from.model_config.model = ml_collections.ConfigDict()
config.init_from.model_config.model.classifier = 'token' # Specify if this is 'token' or 'gap'. pylint: disable=line-too-long
config.init_from.restore_positional_embedding = True
config.init_from.restore_input_embedding = True
config.init_from.positional_embed_size_change = 'tile'
# Learning rate
steps_per_epoch = KINETICS_400_TRAIN_SIZE // config.batch_size
total_steps = config.num_training_epochs * steps_per_epoch
config.lr_configs = ml_collections.ConfigDict()
config.lr_configs.learning_rate_schedule = 'compound'
config.lr_configs.factors = 'constant * cosine_decay * linear_warmup'
config.lr_configs.warmup_steps = int(2.5 * steps_per_epoch)
config.lr_configs.steps_per_cycle = total_steps
config.lr_configs.base_learning_rate = 5e-2
# Logging
config.write_summary = True # write TB and/or XM summary
config.write_xm_measurements = True # write XM measurements
config.checkpoint = True # do checkpointing
config.debug_train = False # debug mode during training
config.debug_eval = False # debug mode during eval
config.checkpoint_steps = 1000 # Checkpoint more frequently than a val epoch.
return config
| 42.178082 | 143 | 0.789055 |
acebacf1d04f1cf0c93a87818db8ff7592ae8f03 | 1,030 | py | Python | Week 1/ex1.py | wahsandaruwan/python_ess_exercises | a43cffa1077d1d9af7ee2e81427416894b29b0b6 | [
"MIT"
] | null | null | null | Week 1/ex1.py | wahsandaruwan/python_ess_exercises | a43cffa1077d1d9af7ee2e81427416894b29b0b6 | [
"MIT"
] | null | null | null | Week 1/ex1.py | wahsandaruwan/python_ess_exercises | a43cffa1077d1d9af7ee2e81427416894b29b0b6 | [
"MIT"
] | null | null | null | # =======Print Method and Numbers in Python========
#---Print Method---
print("Hello World")
# ---Numbers---
# Python has a integer type called int
print("int")
print("---")
print(0)
print(1)
print(-3)
print(3546474574574745)
print("")
# Python has a real number type called float
print("float")
print("-----")
print(0.0)
print(7.35)
print(-43.2)
print("")
# Limited Precision (Loose some digits at the end)
print("Precision")
print("------")
print(4.54656575757567567567)
print(1.24252353634636464544546)
# Scientific/Exponential Notation
print("Scientific Notation")
print("-------------------")
print(5e32) # 5 times 10 to the 32nd power
print(999999999999999999999999999999999999999999999999999.9)
# Infinity NUmbers
print("Infinity")
print("-------------")
print(1e500)
print(-1e500)
print("")
# Conversions
print("Coversions between Numeric Types")
print("-------------------------------------")
print(float(3))
print(float(999999999999999999999999999999999999999999))
print(int(3.0))
print(int(3.7))
print(int(-3.7)) | 20.6 | 60 | 0.666019 |
acebad9f8805399afe19cff13be2359ec1bc2721 | 4,145 | py | Python | leetcode/1609.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | leetcode/1609.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | leetcode/1609.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | """
File: 1609.py
Title: Even Odd Tree
Difficulty: Medium
URL: https://leetcode.com/problems/even-odd-tree/
"""
import unittest
from collections import deque
from typing import List
class TreeNode:
def __init__(self,
val: int,
left: "TreeNode" = None,
right: "TreeNode" = None):
self.val = val
self.left = left
self.right = right
class Solution:
def isEvenOddTree(self, root: TreeNode) -> bool:
odd_q = deque()
even_q = deque()
odd_q.append(root)
while True:
odd_val = None
while odd_q:
current = odd_q.popleft()
if (current.val % 2) == 0:
return False
if odd_val is not None:
if odd_val >= current.val:
return False
odd_val = current.val
if current.left is not None:
even_q.append(current.left)
if current.right is not None:
even_q.append(current.right)
even_val = None
while even_q:
current = even_q.popleft()
if (current.val % 2) == 1:
return False
if even_val is not None:
if even_val <= current.val:
return False
even_val = current.val
if current.left is not None:
odd_q.append(current.left)
if current.right is not None:
odd_q.append(current.right)
if not odd_q:
break
return True
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
root = TreeNode(1,
TreeNode(10,
TreeNode(3,
TreeNode(12),
TreeNode(8))),
TreeNode(4,
TreeNode(7, TreeNode(6)),
TreeNode(9, None, TreeNode(2))))
# Output
output = True
solution = Solution()
self.assertEqual(solution.isEvenOddTree(root), output)
def test_example2(self):
# Input
root = TreeNode(5,
TreeNode(4, TreeNode(3), TreeNode(7)),
TreeNode(2, TreeNode(7)))
# Output
output = False
solution = Solution()
self.assertEqual(solution.isEvenOddTree(root), output)
def test_example3(self):
# Input
root = TreeNode(5,
TreeNode(9, TreeNode(3), TreeNode(5)),
TreeNode(1, TreeNode(7)))
# Output
output = False
solution = Solution()
self.assertEqual(solution.isEvenOddTree(root), output)
def test_example4(self):
# Input
root = TreeNode(1)
# Output
output = True
solution = Solution()
self.assertEqual(solution.isEvenOddTree(root), output)
def test_example5(self):
# Input
root = TreeNode(11,
TreeNode(8,
TreeNode(1,
TreeNode(30, TreeNode(17)),
TreeNode(20)),
TreeNode(3,
TreeNode(18),
TreeNode(16))),
TreeNode(6,
TreeNode(9,
TreeNode(12),
TreeNode(10)),
TreeNode(11,
TreeNode(4),
TreeNode(2))))
# Output
output = True
solution = Solution()
self.assertEqual(solution.isEvenOddTree(root), output)
if __name__ == "__main__":
unittest.main()
| 28.586207 | 69 | 0.429433 |
acebaf26ad17bacb3ff1ac268c491dc1e5ae026b | 4,031 | py | Python | core/models/metrics.py | afranck64/ultimatum | 313b840e6f0942c1c937aa8b3f2d26e5059c7a7f | [
"MIT"
] | null | null | null | core/models/metrics.py | afranck64/ultimatum | 313b840e6f0942c1c937aa8b3f2d26e5059c7a7f | [
"MIT"
] | 7 | 2020-01-28T22:41:06.000Z | 2021-04-29T22:23:23.000Z | core/models/metrics.py | afranck64/ultimatum | 313b840e6f0942c1c937aa8b3f2d26e5059c7a7f | [
"MIT"
] | null | null | null | import numpy as np
MAX_GAIN = 100
# def loss(min_offer, predicted):
# """
# Compute loss for the ultimatum game,
# as the difference between the possible gain and the actual one
# """
# min_offer = min_offer.ravel()
# predicted = predicted.ravel()
# rejected = min_offer > predicted
# accepted = min_offer <= predicted
# res = predicted - min_offer
# if rejected.sum() != 0:
# res[rejected] = 0
# if accepted.sum() != 0:
# res[accepted] = MAX_GAIN - min_offer[accepted]
# low_bad_predictions = (predicted < 0)
# if low_bad_predictions.sum() != 0:
# res[low_bad_predictions] = 0
# high_bad_prediction = (predicted > MAX_GAIN)
# if high_bad_prediction.sum() != 0:
# res[high_bad_prediction] = 0
# return res
@np.vectorize
def loss(min_offer, predicted):
"""
Compute loss for the ultimatum game,
as the difference between the possible gain and the actual one
"""
return MAX_GAIN-min_offer if predicted < min_offer else predicted - min_offer
def loss_sum(min_offer, predicted):
return loss(min_offer.ravel(), predicted.ravel()).sum()
def avg_loss(min_offer, predicted):
"""
Compute avg loss for the ultimatum game
"""
return np.mean(loss(min_offer.ravel(), predicted.ravel()))
def mse(min_offer, predicted):
"""
Compute mse using the loss as error
"""
min_offer = min_offer.ravel()
predicted = predicted.ravel()
return np.mean(np.square(loss(min_offer.ravel(), predicted.ravel())))
def rejection_ratio(min_offer, predicted):
"""
Compute ratio of rejected proposals without consideration of values
"""
accepted = (min_offer <= predicted)
return 1 - np.mean(accepted)
def avg_win_loss(min_offer, predicted):
"""
Compute avg_loss of accepted proposals
"""
min_offer = min_offer.ravel()
predicted = predicted.ravel()
accepted = (min_offer <= predicted)
if accepted.sum() == 0:
return 0
return avg_loss(min_offer[accepted], predicted[accepted])
# def gain(min_offer, predicted):
# min_offer = min_offer.ravel()
# predicted = predicted.ravel()
# res = MAX_GAIN - predicted
# res[predicted < min_offer] = 0
# return res
@np.vectorize
def gain(min_offer, predicted):
return 0 if predicted < min_offer else MAX_GAIN - predicted
def avg_loss_ratio(min_offer, predicted):
"""
Compute the avg gain ratio in relation to the maximal gain
"""
min_offer = min_offer.ravel()
predicted = predicted.ravel()
numerator, denominator = gain(min_offer, predicted), gain(min_offer, min_offer)
zero_mask = denominator==0
denominator[zero_mask] = 1 #avoid division by zero
tmp = numerator / denominator
tmp[denominator==0] = 1
return 1 - np.mean(tmp)
def gain_mean(min_offer, predicted):
min_offer = min_offer.ravel()
predicted = predicted.ravel()
return gain(min_offer, predicted).mean()
def avg_gain_ratio(min_offer, predicted):
min_offer = min_offer.ravel()
predicted = predicted.ravel()
numerator, denominator = gain(min_offer, predicted), gain(min_offer, min_offer)
zero_mask = denominator==0
denominator[zero_mask] = 1 #avoid division by zero
tmp = numerator / denominator
tmp[denominator==0] = 0
return np.mean(tmp)
# return np.mean(gain(min_offer, predicted) / gain(min_offer, min_offer))
def cross_compute(min_offer, predicted, metric):
"""
:param min_offer: responder's minimal acceptable offer
:param predicted: proposed values
:metric: (func) computation metric
"""
res = 0
for offer in predicted:
sub_predicted = np.ones_like(min_offer) * offer
res += metric(min_offer, sub_predicted)
return res/len(predicted)
def invariance(min_offer, predicted):
return 1 / (1 + np.std(predicted)**.5)
__all__ = ['avg_loss', 'mse', 'rejection_ratio', 'avg_win_loss', 'avg_loss_ratio', 'loss_sum', 'MAX_GAIN', 'gain_mean', "cross_compute", "invariance"] | 31.992063 | 150 | 0.672786 |
acebafeef9d7ef7f1d636f67a1618899fd434838 | 421 | py | Python | elink/vendor/_input_diff_buffer.py | cfelton/parallella_elink | ccd2e7d49cca6cf10ed327aadad2d096e38121eb | [
"MIT"
] | null | null | null | elink/vendor/_input_diff_buffer.py | cfelton/parallella_elink | ccd2e7d49cca6cf10ed327aadad2d096e38121eb | [
"MIT"
] | null | null | null | elink/vendor/_input_diff_buffer.py | cfelton/parallella_elink | ccd2e7d49cca6cf10ed327aadad2d096e38121eb | [
"MIT"
] | null | null | null |
from myhdl import Signal, always_comb
def device_input_diff_buffer(in_p, in_n, sig):
if isinstance(sig, list):
num_channels = len(sig)
@always_comb
def rtl_buffer():
for ii in range(num_channels):
sig[ii].next = in_p[ii] and not in_n[ii]
else:
@always_comb
def rtl_buffer():
sig.next = in_p and not in_n
return rtl_buffer
| 20.047619 | 56 | 0.586698 |
acebb22140b2c0de2f7c5c757ad98f50ba043332 | 4,401 | py | Python | lib/python/treadmill/bootstrap/aliases.py | crazyrex/treadmill | 75be287a808a4cbdacab67b3f62a3cb3eb1eab67 | [
"Apache-2.0"
] | 1 | 2019-04-14T20:17:07.000Z | 2019-04-14T20:17:07.000Z | lib/python/treadmill/bootstrap/aliases.py | crazyrex/treadmill | 75be287a808a4cbdacab67b3f62a3cb3eb1eab67 | [
"Apache-2.0"
] | null | null | null | lib/python/treadmill/bootstrap/aliases.py | crazyrex/treadmill | 75be287a808a4cbdacab67b3f62a3cb3eb1eab67 | [
"Apache-2.0"
] | null | null | null | """Default aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import os
from treadmill import subproc
def _s6(exe):
"""Resolve s6 exe."""
s6_dir = subproc.resolve('s6')
if not s6_dir:
return None
return os.path.join(s6_dir, 'bin', exe.replace('_', '-'))
_BIN = functools.partial(os.path.join, '/bin')
_SBIN = functools.partial(os.path.join, '/sbin')
_USR_BIN = functools.partial(os.path.join, '/usr/bin')
_USR_SBIN = functools.partial(os.path.join, '/usr/sbin')
_LINUX_ALIASES = {
# Standard Linux distribution, expect to find these in standard locations,
# so setting value to None.
'awk': _BIN,
'basename': _BIN,
'bc': _USR_BIN,
'blkid': _SBIN,
'brctl': _USR_SBIN,
'cat': _BIN,
'cgclear': _SBIN,
'chmod': _BIN,
'chown': _BIN,
'chroot': _USR_SBIN,
'conntrack': _SBIN,
'cp': _BIN,
'cut': _BIN,
'date': _BIN,
'dirname': _USR_BIN,
'dmesg': _BIN,
'docker': _USR_BIN,
'dockerd': _USR_BIN,
'docker_runtime': '/usr/libexec/docker/docker-runc-current',
'dumpe2fs': _SBIN,
'echo': _BIN,
'expr': _USR_BIN,
'find': _USR_BIN,
'grep': _BIN,
'gzip': _BIN,
'head': _USR_BIN,
'hostname': _BIN,
'ifconfig': _SBIN,
'ionice': _USR_BIN,
'ip': _SBIN,
'ipset': _SBIN,
'iptables': _SBIN,
'iptables_restore': '/sbin/iptables-restore',
'kill': _BIN,
'last': _USR_BIN,
'ln': _BIN,
'logrotate': _USR_SBIN,
'losetup': _SBIN,
'ls': _BIN,
'lssubsys': _BIN,
'lvm': _SBIN,
'mkdir': _BIN,
'mke2fs': _SBIN,
'mkfifo': _USR_BIN,
'mknod': _BIN,
'mktemp': _BIN,
'mount': _BIN,
'mv': _BIN,
'named': _USR_SBIN,
'printf': _USR_BIN,
'ps': _BIN,
'pvremove': _SBIN,
'pvs': _SBIN,
'pwd': _BIN,
'readlink': _BIN,
'rm': _BIN,
'rmdir': _BIN,
'route': _SBIN,
'sed': _BIN,
'sleep': _BIN,
'sshd': _BIN,
'stat': _USR_BIN,
'sysctl': _SBIN,
'tail': _USR_BIN,
'tar': _BIN,
'touch': _BIN,
'true': _BIN,
'tune2fs': _SBIN,
'umount': _BIN,
'unshare': _USR_BIN,
'vgchange': _SBIN,
'vgremove': _SBIN,
'watchdog': _USR_SBIN,
'wc': _USR_BIN,
# https://github.com/axboe/fio
'fio': None,
# s6 root dir.
's6': None,
# s6 utilities
'backtick': _s6,
'cd': _s6,
'define': _s6,
'dnscache': _s6,
'elglob': _s6,
'emptyenv': _s6,
'execlineb': _s6,
'exit': _s6,
'fdmove': _s6,
'forbacktickx': _s6,
'foreground': _s6,
'forstdin': _s6,
'heredoc': _s6,
'if': _s6,
'ifelse': _s6,
'ifte': _s6,
'importas': _s6,
'loopwhilex': _s6,
'openldap': _s6,
'pipeline': _s6,
'redirfd': _s6,
's6_envdir': _s6,
's6_envuidgid': _s6,
's6_fghack': _s6,
's6_ipcclient': _s6,
's6_ipcserver': _s6,
's6_ipcserver_access': _s6,
's6_log': _s6,
's6_setuidgid': _s6,
's6_svc': _s6,
's6_svok': _s6,
's6_svscan': _s6,
's6_svscanctl': _s6,
's6_svwait': _s6,
'withstdinas': _s6,
'umask': _s6,
# Treadmill-bind.
'treadmill_bind_distro': None,
'treadmill_bind_preload.so': None,
# Treadmill spawn.
# TODO: should be moved to treadmill spawn aliases.
'treadmill_spawn_path': None,
'treadmill_spawn': None,
'treadmill_spawn_finish': None,
'treadmill_spawn_run': None,
# Treadmill PID1
'pid1': None,
# Kerberos tools, default to standard locations.
'kinit': None,
'klist': None,
# Treadmill krb tools
'kt_add': None,
'kt_split': None,
'tkt_recv': None,
'tkt_send': None,
# RRD tools.
'rrdcached': None,
'rrdtool': None,
# Open LDAP binaries.
# TODO: should be moved to OpenLDAP aliases.
'slapadd': None,
'slapd': None,
# Why do we need these?
'logstash-forwarder': None,
}
_WINDOWS_ALIASES = {
'winss': None,
'winss_log': None,
'winss_svc': None,
'winss_svok': None,
'winss_svscan': None,
'winss_svscanctl': None,
'winss_svwait': None,
'powershell': 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\'
'powershell.exe',
}
if os.name == 'nt':
ALIASES = _WINDOWS_ALIASES
else:
ALIASES = _LINUX_ALIASES
| 21.468293 | 78 | 0.582822 |
acebb2297f3c11c5f6c56f5535d7561be23c4a6e | 63,842 | py | Python | tensorflow/python/debug/lib/session_debug_testlib.py | Cedo00/tensorflow | 623dd8361325d22d1487b6a2ec67c54065dd726b | [
"Apache-2.0"
] | 8 | 2017-07-27T14:39:56.000Z | 2018-10-21T00:02:36.000Z | tensorflow/python/debug/lib/session_debug_testlib.py | jestervise/tensorflow | 34c738cc6d3badcb22e3f72482536ada29bd0e65 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/debug/lib/session_debug_testlib.py | jestervise/tensorflow | 34c738cc6d3badcb22e3f72482536ada29bd0e65 | [
"Apache-2.0"
] | 38 | 2017-04-28T04:15:48.000Z | 2019-09-28T05:11:46.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class _RNNCellForTest(rnn_cell_impl._RNNCell): # pylint: disable=protected-access
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.Variable(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _generate_dump_from_simple_addition_graph(self):
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.Variable(2.1, name="u")
v = variables.Variable(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_data.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_count, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_count, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, "Node 'foo_bar' does not exist in partition graphs."):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session() as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s" % u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "%s" % s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsNone(u_vals[0])
self.assertEqual(1, len(s_vals))
self.assertIsNone(s_vals[0])
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session() as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
self.assertTrue(os.path.isdir(os.path.join(self._dump_root, u_namespace)))
self.assertTrue(
os.path.isdir(os.path.join(self._dump_root, v_namespace, "v")))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options,
sess.graph,
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
self.assertEqual(
16, sess.run(loop, options=run_options, run_metadata=run_metadata))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options,
sess.graph,
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
self.assertEqual(
21, sess.run(cond, options=run_options, run_metadata=run_metadata))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session() as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session() as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
result = sess.run(y, options=run_options, run_metadata=run_metadata)
self.assertAllClose([2, 4, 7], result)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=1)
sess.run(x, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)
dump1 = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_count, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_count)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=2)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_count and
# executor_step_count.
sess.run(x, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)
dump2 = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_count + 1,
dump2.core_metadata.session_run_count)
self.assertEqual(dump1.core_metadata.executor_step_count + 1,
dump2.core_metadata.executor_step_count)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_count, but not executor_step_count.
sess.run(y, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)
dump3 = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_count + 1,
dump3.core_metadata.session_run_count)
self.assertEqual(0, dump3.core_metadata.executor_step_count)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_count, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_count, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(c, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(a.initializer, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.Variable("1", name="a")
b = variables.Variable("3", name="b")
c = variables.Variable("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
new_run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
new_run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=True)
self.assertEqual(264,
sess.run(
m,
options=new_run_options,
run_metadata=run_metadata))
# The integer-dtype Tensors in the graph should have been dumped
# properly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session() as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_a:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_a:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_a:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session() as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
debug_urls=self._debug_urls())
sess.run(y, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=False)
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
self.assertEqual(2, dump.size)
self.assertAllClose(
[[1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan]],
dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose(
[[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan]],
dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary()"],
debug_urls=self._debug_urls())
sess.run(y, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.Variable([10.0, 10.0], name="a")
b = variables.Variable([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
debug_urls=self._debug_urls())
sess.run(y, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=False)
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
self.assertEqual(1, dump.size)
self.assertAllClose(
[[1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0]],
dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_urls=self._debug_urls())
sess.run(q_init, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
self.assertIsNone(dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0])
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls())
sess.run(w, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.Variable(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_counts = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_counts = []
executor_step_counts = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_counts.append(core_metadata.session_run_count)
executor_step_counts.append(core_metadata.executor_step_count)
all_session_run_counts.extend(session_run_counts)
# Assert that executor_step_count increases by one at a time.
executor_step_counts = zip(timestamps, executor_step_counts)
executor_step_counts = sorted(executor_step_counts, key=lambda x: x[0])
for i in xrange(len(executor_step_counts) - 1):
self.assertEquals(executor_step_counts[i][1] + 1,
executor_step_counts[i + 1][1])
# Assert that session_run_count increase monotonically.
session_run_counts = zip(timestamps, session_run_counts)
session_run_counts = sorted(session_run_counts, key=lambda x: x[0])
for i in xrange(len(session_run_counts) - 1):
self.assertGreater(session_run_counts[i + 1][1],
session_run_counts[i][1])
# Assert that the session_run_counts from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_counts),
len(set(all_session_run_counts)))
if __name__ == "__main__":
googletest.main()
| 40.611959 | 82 | 0.674509 |
acebb47099d5e48db109ed45e352ec3d9370e05a | 748 | py | Python | Python3/no47_Permutations_II.py | mistwave/leetcode | 38eb0556f865fd06f517ca45253d00aaca39d70b | [
"MIT"
] | null | null | null | Python3/no47_Permutations_II.py | mistwave/leetcode | 38eb0556f865fd06f517ca45253d00aaca39d70b | [
"MIT"
] | null | null | null | Python3/no47_Permutations_II.py | mistwave/leetcode | 38eb0556f865fd06f517ca45253d00aaca39d70b | [
"MIT"
] | null | null | null | class Solution(object):
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if nums == []:
return []
result = []
self.helper(nums, [], result)
return result
def helper(self, nums, cur, result):
if nums == []:
result.append(cur[:])
else:
seen = set()
for i in range(len(nums)):
if nums[i] not in seen: # skip duplicated value
seen.add(nums[i])
newnums = nums[:]
newnums.pop(i)
cur.append(nums[i])
self.helper(newnums, cur, result)
cur.pop()
| 27.703704 | 64 | 0.421123 |
acebb5970dc50d520118aac5cc16c528c716571e | 3,354 | py | Python | music_publisher/migrations/0009_auto_20180912_1217.py | Huanghibo/django-music-publisher | 298fe497670c02951d617aa6b6a6e03995fa6562 | [
"MIT"
] | 1 | 2021-02-28T07:08:13.000Z | 2021-02-28T07:08:13.000Z | music_publisher/migrations/0009_auto_20180912_1217.py | Huanghibo/django-music-publisher | 298fe497670c02951d617aa6b6a6e03995fa6562 | [
"MIT"
] | null | null | null | music_publisher/migrations/0009_auto_20180912_1217.py | Huanghibo/django-music-publisher | 298fe497670c02951d617aa6b6a6e03995fa6562 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2018-09-12 12:17
import django.core.validators
from django.db import migrations, models
import music_publisher.models
class Migration(migrations.Migration):
dependencies = [
('music_publisher', '0008_auto_20180911_1055'),
]
operations = [
migrations.AddField(
model_name='albumcd',
name='album_label',
field=models.CharField(blank=True, default='FOO BAR MUSIC', max_length=60, validators=[music_publisher.models.CWRFieldValidator('first_album_label')]),
),
migrations.AlterField(
model_name='albumcd',
name='album_title',
field=models.CharField(blank=True, default='', max_length=60, unique=True, validators=[music_publisher.models.CWRFieldValidator('first_album_title')]),
preserve_default=False,
),
migrations.AlterField(
model_name='workacknowledgement',
name='society_code',
field=models.CharField(choices=[('055', 'SABAM, Belgium'), ('101', 'SOCAN, Canada'), ('088', 'CMRRA, Canada'), ('040', 'KODA, Denmark'), ('089', 'TEOSTO, Finland'), ('058', 'SACEM, France'), ('035', 'GEMA, Germany'), ('074', 'SIAE, Italy'), ('023', 'BUMA, Netherlands'), ('078', 'STEMRA, Netherlands'), ('090', 'TONO, Norway'), ('079', 'STIM, Sweden'), ('052', 'PRS, United Kingdom'), ('044', 'MCPS, United Kingdom'), ('010', 'ASCAP, United States'), ('021', 'BMI, United States'), ('071', 'SESAC Inc., United States'), ('034', 'HFA, United States')], max_length=3, verbose_name='Society'),
),
migrations.AlterField(
model_name='writer',
name='pr_society',
field=models.CharField(blank=True, choices=[('055', 'SABAM, Belgium'), ('101', 'SOCAN, Canada'), ('088', 'CMRRA, Canada'), ('040', 'KODA, Denmark'), ('089', 'TEOSTO, Finland'), ('058', 'SACEM, France'), ('035', 'GEMA, Germany'), ('074', 'SIAE, Italy'), ('023', 'BUMA, Netherlands'), ('078', 'STEMRA, Netherlands'), ('090', 'TONO, Norway'), ('079', 'STIM, Sweden'), ('052', 'PRS, United Kingdom'), ('044', 'MCPS, United Kingdom'), ('010', 'ASCAP, United States'), ('021', 'BMI, United States'), ('071', 'SESAC Inc., United States'), ('034', 'HFA, United States')], help_text='Required for a controlled writer', max_length=3, null=True, validators=[music_publisher.models.CWRFieldValidator('writer_pr_society')], verbose_name='Performing Rights Society'),
),
migrations.AlterField(
model_name='writer',
name='publisher_fee',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage of royalties kept by the publisher', max_digits=5, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
migrations.AlterField(
model_name='writerinwork',
name='publisher_fee',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage of royalties kept by the publisher', max_digits=5, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
migrations.AlterUniqueTogether(
name='albumcd',
unique_together={('album_title', 'album_label')},
),
]
| 65.764706 | 765 | 0.635063 |
acebb6cd4018ee742845d92ae024ecb094d72b2c | 231 | py | Python | artefacts/py2/tests/trace_mockup_6x.py | leiflundgren/mx-trace-print | 6523b63ef7d196fb761ee17bf6576174c0d9ec40 | [
"Apache-2.0"
] | null | null | null | artefacts/py2/tests/trace_mockup_6x.py | leiflundgren/mx-trace-print | 6523b63ef7d196fb761ee17bf6576174c0d9ec40 | [
"Apache-2.0"
] | 2 | 2019-01-21T12:54:04.000Z | 2019-01-28T16:51:55.000Z | artefacts/py2/tests/trace_mockup_6x.py | leiflundgren/mx-trace-print | 6523b63ef7d196fb761ee17bf6576174c0d9ec40 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
###
### Mockup that simulates trace on MX-One 6.x
from __future__ import absolute_import
import trace_mockup
def main_mx6x():
return trace_mockup.TraceMockup(6)
if __name__ == u"__main__":
main_mx6x() | 19.25 | 45 | 0.731602 |
acebb82034375a3e168bb8d6d9287065948e9dde | 16,351 | py | Python | skl2onnx/operator_converters/support_vector_machines.py | snapADDY/sklearn-onnx | 8e5d3c924af3a922c3c36c08b8cf7af5967e81b6 | [
"Apache-2.0"
] | 323 | 2018-12-18T20:23:19.000Z | 2022-03-25T09:47:31.000Z | skl2onnx/operator_converters/support_vector_machines.py | snapADDY/sklearn-onnx | 8e5d3c924af3a922c3c36c08b8cf7af5967e81b6 | [
"Apache-2.0"
] | 408 | 2019-01-02T12:16:10.000Z | 2022-03-21T14:01:28.000Z | skl2onnx/operator_converters/support_vector_machines.py | snapADDY/sklearn-onnx | 8e5d3c924af3a922c3c36c08b8cf7af5967e81b6 | [
"Apache-2.0"
] | 70 | 2018-12-20T19:36:07.000Z | 2022-03-14T06:41:36.000Z | # SPDX-License-Identifier: Apache-2.0
import numbers
import numpy as np
from scipy.sparse import isspmatrix
from sklearn.svm import SVC, NuSVC, SVR, NuSVR, OneClassSVM
from ..common._apply_operation import (
apply_cast, apply_concat, apply_abs,
apply_add, apply_mul, apply_div)
try:
from ..common._apply_operation import apply_less
except ImportError:
# onnxconverter-common is too old
apply_less = None
from ..common.data_types import (
BooleanTensorType, Int64TensorType, guess_proto_type)
from ..common._registration import register_converter
from ..proto import onnx_proto
def convert_sklearn_svm_regressor(
scope, operator, container,
op_type='SVMRegressor', op_domain='ai.onnx.ml', op_version=1):
"""
Converter for model
`SVR <https://scikit-learn.org/stable/modules/
generated/sklearn.svm.SVR.html>`_,
`NuSVR <https://scikit-learn.org/stable/modules/
generated/sklearn.svm.NuSVR.html>`_,
`OneClassSVM <https://scikit-learn.org/stable/
modules/generated/sklearn.svm.OneClassSVM.html>`_.
The converted model in ONNX produces the same results as the
original model except when probability=False:
*onnxruntime* and *scikit-learn* do not return the same raw
scores. *scikit-learn* returns aggregated scores
as a *matrix[N, C]* coming from `_ovr_decision_function
<https://github.com/scikit-learn/scikit-learn/blob/master/
sklearn/utils/multiclass.py#L402>`_. *onnxruntime* returns
the raw score from *svm* algorithm as a *matrix[N, (C(C-1)/2]*.
"""
svm_attrs = {'name': scope.get_unique_operator_name('SVM')}
op = operator.raw_operator
if isinstance(op.dual_coef_, np.ndarray):
coef = op.dual_coef_.ravel()
else:
coef = op.dual_coef_
intercept = op.intercept_
if isinstance(op.support_vectors_, np.ndarray):
support_vectors = op.support_vectors_.ravel()
else:
support_vectors = op.support_vectors_
svm_attrs['kernel_type'] = op.kernel.upper()
svm_attrs['kernel_params'] = [np.float32(_) for _ in
[op._gamma, op.coef0, op.degree]]
if isspmatrix(support_vectors):
svm_attrs['support_vectors'] = support_vectors.toarray().ravel()
else:
svm_attrs['support_vectors'] = support_vectors
if isspmatrix(coef):
svm_attrs['coefficients'] = coef.toarray().ravel()
else:
svm_attrs['coefficients'] = coef
svm_attrs['rho'] = intercept.astype(np.float32)
svm_attrs['coefficients'] = svm_attrs['coefficients'].astype(np.float32)
svm_attrs['support_vectors'] = svm_attrs['support_vectors'].astype(
np.float32)
proto_dtype = guess_proto_type(operator.inputs[0].type)
if proto_dtype != onnx_proto.TensorProto.DOUBLE:
proto_dtype = onnx_proto.TensorProto.FLOAT
if operator.type in ['SklearnSVR', 'SklearnNuSVR'] or isinstance(
op, (SVR, NuSVR)):
svm_attrs['post_transform'] = 'NONE'
svm_attrs['n_supports'] = len(op.support_)
input_name = operator.input_full_names
if type(operator.inputs[0].type) in (
BooleanTensorType, Int64TensorType):
cast_input_name = scope.get_unique_variable_name('cast_input')
apply_cast(scope, operator.input_full_names, cast_input_name,
container, to=proto_dtype)
input_name = cast_input_name
svm_out = scope.get_unique_variable_name('SVM03')
container.add_node(
op_type, input_name, svm_out,
op_domain=op_domain, op_version=op_version, **svm_attrs)
apply_cast(scope, svm_out, operator.output_full_names,
container, to=proto_dtype)
elif (operator.type in ['SklearnOneClassSVM'] or
isinstance(op, OneClassSVM)):
svm_attrs['post_transform'] = 'NONE'
svm_attrs['n_supports'] = len(op.support_)
input_name = operator.input_full_names
if type(operator.inputs[0].type) in (
BooleanTensorType, Int64TensorType):
cast_input_name = scope.get_unique_variable_name('cast_input')
apply_cast(scope, operator.input_full_names, cast_input_name,
container, to=proto_dtype)
input_name = cast_input_name
svm_out0 = scope.get_unique_variable_name('SVMO1')
container.add_node(
op_type, input_name, svm_out0,
op_domain=op_domain, op_version=op_version, **svm_attrs)
svm_out = operator.output_full_names[1]
apply_cast(scope, svm_out0, svm_out, container, to=proto_dtype)
pred = scope.get_unique_variable_name('float_prediction')
container.add_node('Sign', svm_out, pred, op_version=9)
apply_cast(scope, pred, operator.output_full_names[0],
container, to=onnx_proto.TensorProto.INT64)
else:
raise ValueError("Unknown support vector machine model type found "
"'{0}'.".format(operator.type))
def convert_sklearn_svm_classifier(
scope, operator, container,
op_type='SVMClassifier', op_domain='ai.onnx.ml', op_version=1):
"""
Converter for model
`SVC <https://scikit-learn.org/stable/modules/
generated/sklearn.svm.SVC.html>`_,
`NuSVC <https://scikit-learn.org/stable/modules/
generated/sklearn.svm.NuSVC.html>`_.
The converted model in ONNX produces the same results as the
original model except when probability=False:
*onnxruntime* and *scikit-learn* do not return the same raw
scores. *scikit-learn* returns aggregated scores
as a *matrix[N, C]* coming from `_ovr_decision_function
<https://github.com/scikit-learn/scikit-learn/blob/master/
sklearn/utils/multiclass.py#L402>`_. *onnxruntime* returns
the raw score from *svm* algorithm as a *matrix[N, (C(C-1)/2]*.
"""
proto_dtype = guess_proto_type(operator.inputs[0].type)
if proto_dtype != onnx_proto.TensorProto.DOUBLE:
proto_dtype = onnx_proto.TensorProto.FLOAT
svm_attrs = {'name': scope.get_unique_operator_name('SVMc')}
op = operator.raw_operator
if isinstance(op.dual_coef_, np.ndarray):
coef = op.dual_coef_.ravel()
else:
coef = op.dual_coef_
intercept = op.intercept_
if isinstance(op.support_vectors_, np.ndarray):
support_vectors = op.support_vectors_.ravel()
elif isspmatrix(op.support_vectors_):
support_vectors = op.support_vectors_.toarray().ravel()
else:
support_vectors = op.support_vectors_
svm_attrs['kernel_type'] = op.kernel.upper()
svm_attrs['kernel_params'] = [float(_) for _ in
[op._gamma, op.coef0, op.degree]]
svm_attrs['support_vectors'] = support_vectors
if (operator.type in ['SklearnSVC', 'SklearnNuSVC'] or isinstance(
op, (SVC, NuSVC))) and len(op.classes_) == 2:
if isspmatrix(coef):
coef_dense = coef.toarray().ravel()
svm_attrs['coefficients'] = -coef_dense
else:
svm_attrs['coefficients'] = -coef
svm_attrs['rho'] = -intercept
else:
if isspmatrix(coef):
svm_attrs['coefficients'] = coef.todense()
else:
svm_attrs['coefficients'] = coef
svm_attrs['rho'] = intercept
handles_ovr = False
svm_attrs['coefficients'] = svm_attrs['coefficients'].astype(np.float32)
svm_attrs['support_vectors'] = svm_attrs['support_vectors'].astype(
np.float32)
svm_attrs['rho'] = svm_attrs['rho'].astype(np.float32)
options = container.get_options(op, dict(raw_scores=False))
use_raw_scores = options['raw_scores']
if operator.type in ['SklearnSVC', 'SklearnNuSVC'] or isinstance(
op, (SVC, NuSVC)):
if len(op.probA_) > 0:
svm_attrs['prob_a'] = op.probA_.astype(np.float32)
else:
handles_ovr = True
if len(op.probB_) > 0:
svm_attrs['prob_b'] = op.probB_.astype(np.float32)
if (hasattr(op, 'decision_function_shape') and
op.decision_function_shape == 'ovr' and handles_ovr and
len(op.classes_) > 2):
output_name = scope.get_unique_variable_name('before_ovr')
elif len(op.classes_) == 2 and use_raw_scores:
output_name = scope.get_unique_variable_name('raw_scores')
else:
output_name = operator.outputs[1].full_name
svm_attrs['post_transform'] = 'NONE'
svm_attrs['vectors_per_class'] = op.n_support_.tolist()
label_name = operator.outputs[0].full_name
probability_tensor_name = output_name
if all(isinstance(i, (numbers.Real, bool, np.bool_))
for i in op.classes_):
labels = [int(i) for i in op.classes_]
svm_attrs['classlabels_ints'] = labels
elif all(isinstance(i, str) for i in op.classes_):
labels = [str(i) for i in op.classes_]
svm_attrs['classlabels_strings'] = labels
else:
raise RuntimeError("Invalid class label type '%s'." % op.classes_)
svm_out = scope.get_unique_variable_name('SVM02')
container.add_node(
op_type, operator.inputs[0].full_name,
[label_name, svm_out],
op_domain=op_domain, op_version=op_version, **svm_attrs)
apply_cast(scope, svm_out, probability_tensor_name,
container, to=proto_dtype)
if len(op.classes_) == 2 and use_raw_scores:
minus_one = scope.get_unique_variable_name('minus_one')
container.add_initializer(minus_one, proto_dtype, [], [-1])
container.add_node(
'Mul', [output_name, minus_one], operator.outputs[1].full_name,
name=scope.get_unique_operator_name('MulRawScores'))
else:
raise ValueError("Unknown support vector machine model type found "
"'{0}'.".format(operator.type))
if (hasattr(op, 'decision_function_shape') and
op.decision_function_shape == 'ovr' and handles_ovr and
len(op.classes_) > 2):
# Applies _ovr_decision_function.
# See https://github.com/scikit-learn/scikit-learn/blob/
# master/sklearn/utils/multiclass.py#L407:
# ::
# _ovr_decision_function(dec < 0, -dec, len(self.classes_))
#
# ...
# def _ovr_decision_function(predictions, confidences, n_classes):
#
# n_samples = predictions.shape[0]
# votes = np.zeros((n_samples, n_classes))
# sum_of_confidences = np.zeros((n_samples, n_classes))
# k = 0
# for i in range(n_classes):
# for j in range(i + 1, n_classes):
# sum_of_confidences[:, i] -= confidences[:, k]
# sum_of_confidences[:, j] += confidences[:, k]
# votes[predictions[:, k] == 0, i] += 1
# votes[predictions[:, k] == 1, j] += 1
# k += 1
# transformed_confidences = (
# sum_of_confidences / (3 * (np.abs(sum_of_confidences) + 1)))
# return votes + transformed_confidences
cst3 = scope.get_unique_variable_name('cst3')
container.add_initializer(cst3, proto_dtype, [], [3])
cst1 = scope.get_unique_variable_name('cst1')
container.add_initializer(cst1, proto_dtype, [], [1])
cst0 = scope.get_unique_variable_name('cst0')
container.add_initializer(cst0, proto_dtype, [], [0])
prediction = scope.get_unique_variable_name('prediction')
if apply_less is None:
raise RuntimeError(
"Function apply_less is missing. "
"onnxconverter-common is too old.")
proto_dtype = guess_proto_type(operator.inputs[0].type)
if proto_dtype != onnx_proto.TensorProto.DOUBLE:
proto_dtype = onnx_proto.TensorProto.FLOAT
apply_less(scope, [output_name, cst0], prediction, container)
iprediction = scope.get_unique_variable_name('iprediction')
apply_cast(scope, prediction, iprediction, container,
to=proto_dtype)
n_classes = len(op.classes_)
sumc_name = [scope.get_unique_variable_name('svcsumc_%d' % i)
for i in range(n_classes)]
vote_name = [scope.get_unique_variable_name('svcvote_%d' % i)
for i in range(n_classes)]
sumc_add = {n: [] for n in sumc_name}
vote_add = {n: [] for n in vote_name}
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
name = scope.get_unique_operator_name(
'ArrayFeatureExtractor')
ext = scope.get_unique_variable_name('Csvc_%d' % k)
ind = scope.get_unique_variable_name('Cind_%d' % k)
container.add_initializer(
ind, onnx_proto.TensorProto.INT64, [], [k])
container.add_node(
'ArrayFeatureExtractor', [output_name, ind],
ext, op_domain='ai.onnx.ml', name=name)
sumc_add[sumc_name[i]].append(ext)
neg = scope.get_unique_variable_name('Cneg_%d' % k)
name = scope.get_unique_operator_name('Neg')
container.add_node(
'Neg', ext, neg, op_domain='', name=name,
op_version=6)
sumc_add[sumc_name[j]].append(neg)
# votes
name = scope.get_unique_operator_name(
'ArrayFeatureExtractor')
ext = scope.get_unique_variable_name('Vsvcv_%d' % k)
container.add_node(
'ArrayFeatureExtractor', [iprediction, ind],
ext, op_domain='ai.onnx.ml', name=name)
vote_add[vote_name[j]].append(ext)
neg = scope.get_unique_variable_name('Vnegv_%d' % k)
name = scope.get_unique_operator_name('Neg')
container.add_node(
'Neg', ext, neg, op_domain='', name=name,
op_version=6)
neg1 = scope.get_unique_variable_name('Vnegv1_%d' % k)
apply_add(scope, [neg, cst1], neg1, container, broadcast=1,
operator_name='AddCl_%d_%d' % (i, j))
vote_add[vote_name[i]].append(neg1)
# next
k += 1
for k, v in sumc_add.items():
name = scope.get_unique_operator_name('Sum')
container.add_node(
'Sum', v, k, op_domain='', name=name, op_version=8)
for k, v in vote_add.items():
name = scope.get_unique_operator_name('Sum')
container.add_node(
'Sum', v, k, op_domain='', name=name, op_version=8)
conc = scope.get_unique_variable_name('Csvcconc')
apply_concat(scope, sumc_name, conc, container, axis=1)
conc_vote = scope.get_unique_variable_name('Vsvcconcv')
apply_concat(scope, vote_name, conc_vote, container, axis=1)
conc_abs = scope.get_unique_variable_name('Cabs')
apply_abs(scope, conc, conc_abs, container)
conc_abs1 = scope.get_unique_variable_name('Cconc_abs1')
apply_add(scope, [conc_abs, cst1], conc_abs1, container, broadcast=1,
operator_name='AddF0')
conc_abs3 = scope.get_unique_variable_name('Cconc_abs3')
apply_mul(scope, [conc_abs1, cst3], conc_abs3, container, broadcast=1)
final = scope.get_unique_variable_name('Csvcfinal')
apply_div(
scope, [conc, conc_abs3], final, container, broadcast=0)
output_name = operator.outputs[1].full_name
apply_add(
scope, [conc_vote, final], output_name, container, broadcast=0,
operator_name='AddF1')
register_converter('SklearnOneClassSVM', convert_sklearn_svm_regressor)
register_converter('SklearnSVC', convert_sklearn_svm_classifier,
options={'zipmap': [True, False, 'columns'],
'nocl': [True, False],
'raw_scores': [True, False]})
register_converter('SklearnSVR', convert_sklearn_svm_regressor)
| 43.719251 | 79 | 0.622103 |
acebb82428fa85ac5ab2fafa1120ecf42a11b744 | 387 | py | Python | ptart/migrations/0011_label_deprecated.py | Fisjkars/sh00t | 3cae9d7f2b68c62ff58655d46f73c7a4ce2f6ec7 | [
"MIT"
] | 17 | 2020-04-28T15:42:44.000Z | 2022-03-21T07:45:07.000Z | ptart/migrations/0011_label_deprecated.py | Fisjkars/sh00t | 3cae9d7f2b68c62ff58655d46f73c7a4ce2f6ec7 | [
"MIT"
] | 34 | 2019-01-29T06:50:03.000Z | 2019-05-24T08:39:02.000Z | ptart/migrations/0011_label_deprecated.py | Fisjkars/sh00t | 3cae9d7f2b68c62ff58655d46f73c7a4ce2f6ec7 | [
"MIT"
] | 5 | 2020-09-22T20:02:37.000Z | 2022-03-15T12:53:00.000Z | # Generated by Django 2.2.24 on 2022-04-20 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ptart', '0010_project_archived'),
]
operations = [
migrations.AddField(
model_name='label',
name='deprecated',
field=models.BooleanField(default=False),
),
]
| 20.368421 | 53 | 0.599483 |
acebb8500686e56b30ce57d9529b0065b126a35c | 959 | py | Python | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_paged.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_paged.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_paged.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class USqlDatabasePaged(Paged):
"""
A paging container for iterating over a list of :class:`USqlDatabase <azure.mgmt.datalake.analytics.catalog.models.USqlDatabase>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[USqlDatabase]'}
}
def __init__(self, *args, **kwargs):
super(USqlDatabasePaged, self).__init__(*args, **kwargs)
| 34.25 | 140 | 0.582899 |
acebb8542b60d508e35be0274f995b27b24ff4f4 | 3,490 | py | Python | tests/db/dump_tool.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 6 | 2021-03-09T10:24:02.000Z | 2022-01-16T03:52:11.000Z | tests/db/dump_tool.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 1,319 | 2020-12-18T08:52:29.000Z | 2022-03-31T18:17:32.000Z | tests/db/dump_tool.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 4 | 2021-03-03T15:36:50.000Z | 2022-03-11T11:41:51.000Z | """
Copyright 2021 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
Tool to populate the database and dump it for database update testing
"""
import asyncio
import os
import shutil
import pytest
from inmanta import const
from inmanta.protocol import methods
from inmanta.server import SLICE_SERVER
if __file__ and os.path.dirname(__file__).split("/")[-2] == "inmanta_tests":
from inmanta_tests.utils import _wait_until_deployment_finishes, wait_for_version # noqa: F401
else:
from utils import _wait_until_deployment_finishes, wait_for_version
def check_result(result):
assert result.code == 200
@pytest.mark.asyncio
async def test_dump_db(server, client, postgres_db, database_name):
"""
Note: remove following line from the dump: SELECT pg_catalog.set_config('search_path', '', false);
"""
if False:
# trick autocomplete to have autocomplete on client
client = methods
result = await client.create_project("project-test-a")
assert result.code == 200
project_id = result.result["project"]["id"]
result = await client.create_environment(project_id=project_id, name="dev-1")
assert result.code == 200
env_id_1 = result.result["environment"]["id"]
result = await client.create_environment(project_id=project_id, name="dev-2")
assert result.code == 200
project_dir = os.path.join(server.get_slice(SLICE_SERVER)._server_storage["environments"], str(env_id_1))
project_source = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data", "simple_project")
outname = "dbdump"
print("Project at: ", project_dir)
shutil.copytree(project_source, project_dir)
check_result(await client.set_setting(env_id_1, "autostart_agent_deploy_splay_time", 0))
check_result(await client.set_setting(env_id_1, "autostart_agent_deploy_interval", 0))
check_result(await client.set_setting(env_id_1, "autostart_agent_repair_splay_time", 0))
check_result(await client.set_setting(env_id_1, "autostart_agent_repair_interval", 600))
await client.notify_change(id=env_id_1)
versions = await wait_for_version(client, env_id_1, 1)
v1 = versions["versions"][0]["version"]
await client.release_version(env_id_1, v1, push=True, agent_trigger_method=const.AgentTriggerMethod.push_full_deploy)
await _wait_until_deployment_finishes(client, env_id_1, v1, 20)
await client.notify_change(id=env_id_1)
versions = await wait_for_version(client, env_id_1, 2)
v2 = versions["versions"][0]["version"]
await client.release_version(env_id_1, v2, push=True, agent_trigger_method=const.AgentTriggerMethod.push_full_deploy)
await _wait_until_deployment_finishes(client, env_id_1, v2, 20)
proc = await asyncio.create_subprocess_exec(
"pg_dump", "-h", "127.0.0.1", "-p", str(postgres_db.port), "-f", outname, "-O", "-U", postgres_db.user, database_name
)
await proc.wait()
| 35.979381 | 125 | 0.738682 |
acebb8a535acd52814070d8d83859ae753f21d0c | 1,282 | py | Python | h2o-py/tests/testdir_algos/deeplearning/pyunit_metrics_deeplearning.py | voltek62/h2o-3 | d581245120bf0cb6fab2bc7e8273b4f41f461448 | [
"Apache-2.0"
] | 1 | 2018-03-22T12:45:32.000Z | 2018-03-22T12:45:32.000Z | h2o-py/tests/testdir_algos/deeplearning/pyunit_metrics_deeplearning.py | voltek62/h2o-3 | d581245120bf0cb6fab2bc7e8273b4f41f461448 | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/testdir_algos/deeplearning/pyunit_metrics_deeplearning.py | voltek62/h2o-3 | d581245120bf0cb6fab2bc7e8273b4f41f461448 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def deep_learning_metrics_test():
# connect to existing cluster
df = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
df.drop("ID") # remove ID
df['CAPSULE'] = df['CAPSULE'].asfactor() # make CAPSULE categorical
vol = df['VOL']
vol[vol == 0] = float("nan") # 0 VOL means 'missing'
r = vol.runif() # random train/test split
train = df[r < 0.8]
test = df[r >= 0.8]
# See that the data is ready
train.describe()
train.head()
train.tail()
test.describe()
test.head()
test.tail()
# Run DeepLearning
print("Train a Deeplearning model: ")
dl = H2ODeepLearningEstimator(epochs=100, hidden=[10,10,10], loss="CrossEntropy")
dl.train(x=list(range(2,train.ncol)),y="CAPSULE", training_frame=train)
print("Binomial Model Metrics: ")
print()
dl.show()
dl.model_performance(test).show()
if __name__ == "__main__":
pyunit_utils.standalone_test(deep_learning_metrics_test)
else:
deep_learning_metrics_test()
| 27.869565 | 83 | 0.670047 |
acebb9e3ad09ecf6fa2cc7fecef4288dbea934ad | 1,559 | py | Python | climlab/solar/orbital/__init__.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 160 | 2015-02-25T15:56:37.000Z | 2022-03-14T23:51:23.000Z | climlab/solar/orbital/__init__.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 137 | 2015-12-18T17:39:31.000Z | 2022-02-04T20:50:53.000Z | climlab/solar/orbital/__init__.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 54 | 2015-04-28T05:57:39.000Z | 2022-02-17T08:15:11.000Z | """
The object ``climlab.solar.orbital.OrbitalTable`` is an ``xarray.Dataset``
holding orbital data (**eccentricity**, **obliquity**, and **longitude of perihelion**)
for the past 5 Myears. The data are from :cite:`Berger_1991`.
Data are read from the file ``orbit91``, which was originally obtained from
<https://www1.ncdc.noaa.gov/pub/data/paleo/climate_forcing/orbital_variations/insolation/>
If the file isn't found locally, the module will attempt to read it remotely
from the above URL.
A subclass ``climlab.solar.orbital.long.OrbitalTable``
works with La2004 orbital data for
-51 to +21 Myears as calculated by :cite:`Laskar_2004`.
See <http://vo.imcce.fr/insola/earth/online/earth/La2004/README.TXT>
(Breaking change from climlab 0.7.0 and previous)
:Example:
Load orbital data from the past 5 Myears::
# Load the data
from climlab.solar.orbital import OrbitalTable
# Examine the xarray object
print(OrbitalTable)
# Get a timeseries of obliquity
print(OrbitalTable.obliquity)
# Get the orbital data for a specific year, 10 kyear before present:
print(OrbitalTable.interp(kyear=-10))
# Get the long orbital table data
from climlab.solar.orbital.long import OrbitalTable as LongTable
print(LongTable)
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import pandas as pd
import xarray as xr
from .table import _get_Berger_data
OrbitalTable = _get_Berger_data()
| 35.431818 | 90 | 0.709429 |
acebb9e408f29555930a5edf73cd1b1b7f29f09e | 6,128 | py | Python | kubernetes/client/models/v1beta2_scale_status.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 2 | 2020-06-21T08:03:18.000Z | 2020-06-21T09:53:29.000Z | kubernetes/client/models/v1beta2_scale_status.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta2_scale_status.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 1 | 2020-12-10T07:28:08.000Z | 2020-12-10T07:28:08.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1beta2ScaleStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'replicas': 'int',
'selector': 'dict(str, str)',
'target_selector': 'str'
}
attribute_map = {
'replicas': 'replicas',
'selector': 'selector',
'target_selector': 'targetSelector'
}
def __init__(self, replicas=None, selector=None, target_selector=None): # noqa: E501
"""V1beta2ScaleStatus - a model defined in OpenAPI""" # noqa: E501
self._replicas = None
self._selector = None
self._target_selector = None
self.discriminator = None
self.replicas = replicas
if selector is not None:
self.selector = selector
if target_selector is not None:
self.target_selector = target_selector
@property
def replicas(self):
"""Gets the replicas of this V1beta2ScaleStatus. # noqa: E501
actual number of observed instances of the scaled object. # noqa: E501
:return: The replicas of this V1beta2ScaleStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1beta2ScaleStatus.
actual number of observed instances of the scaled object. # noqa: E501
:param replicas: The replicas of this V1beta2ScaleStatus. # noqa: E501
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def selector(self):
"""Gets the selector of this V1beta2ScaleStatus. # noqa: E501
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors # noqa: E501
:return: The selector of this V1beta2ScaleStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1beta2ScaleStatus.
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors # noqa: E501
:param selector: The selector of this V1beta2ScaleStatus. # noqa: E501
:type: dict(str, str)
"""
self._selector = selector
@property
def target_selector(self):
"""Gets the target_selector of this V1beta2ScaleStatus. # noqa: E501
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:return: The target_selector of this V1beta2ScaleStatus. # noqa: E501
:rtype: str
"""
return self._target_selector
@target_selector.setter
def target_selector(self, target_selector):
"""Sets the target_selector of this V1beta2ScaleStatus.
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:param target_selector: The target_selector of this V1beta2ScaleStatus. # noqa: E501
:type: str
"""
self._target_selector = target_selector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ScaleStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.627907 | 505 | 0.63267 |
acebba3249e9f6c5997d45e3c5531cea34b44ca3 | 832 | py | Python | pepper/brain/utils/constants.py | cltl/pepper | 5d34fc5074473163aa9273016d89e5e2b8edffa9 | [
"MIT"
] | 29 | 2018-01-20T08:51:42.000Z | 2022-01-25T11:59:28.000Z | pepper/brain/utils/constants.py | cltl/pepper | 5d34fc5074473163aa9273016d89e5e2b8edffa9 | [
"MIT"
] | 32 | 2018-09-20T13:09:34.000Z | 2021-06-04T15:23:45.000Z | pepper/brain/utils/constants.py | cltl-leolani/pepper | 2592b867bdb44dd23a0fa58f9f96b9f6083d6804 | [
"MIT"
] | 10 | 2018-10-25T02:45:21.000Z | 2020-10-03T12:59:10.000Z | """
Other agreements/notes
Labels are connected by a -
Statements are hashed to be connected by a _
"""
NAMESPACE_MAPPING = {
'Instance': 'GAF',
'Assertion': 'GAF',
'Statement': 'GRASP',
'Experience': 'GRASP',
'Chat': 'GRASP',
'Visual': 'GRASP',
'Utterance': 'GRASP',
'Detection': 'GRASP',
'Mention': 'GAF',
'Attribution': 'GRASP',
'AttributionValue': 'GRASP',
'FactualityValue': 'GRASPf',
'CertaintyValue': 'GRASPf',
'TemporalValue': 'GRASPf',
'PolarityValue': 'GRASPf',
'SentimentValue': 'GRASPs',
'EmotionValue': 'GRASPe',
'Source': 'GRASP',
'Actor': 'SEM',
'Event': 'SEM',
'Place': 'SEM',
'Time': 'SEM',
'DateTimeDescription': 'TIME',
'Context': 'EPS'
}
CAPITALIZED_TYPES = ['person']
NOT_TO_MENTION_TYPES = ['instance']
| 22.486486 | 48 | 0.581731 |
acebba431d4e01b48157a90b7be1195d3e12e5b0 | 791 | py | Python | tools/timedelta.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 16 | 2021-07-14T23:32:31.000Z | 2022-03-24T16:25:15.000Z | tools/timedelta.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-20T20:39:47.000Z | 2021-09-16T20:57:59.000Z | tools/timedelta.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-15T19:38:35.000Z | 2022-01-31T19:24:56.000Z | #!/usr/bin/env python3.6
'''
Compute the delta between contig processing start/end times on the pcmp console log.
Outputs h:m:s contig base_count
'''
import fileinput
from datetime import datetime
times = dict()
# 2019-07-18 20:25:36,905
fmt = '%Y-%m-%d %H:%M:%S'
for line in fileinput.input():
try:
d, hmsm, _, sf, contig = line.split()
except ValueError:
continue
hms, ms = hmsm.split(',')
thetime = f'{d} {hms}'
if contig in times:
start = times[contig]
try:
chrom, rng = contig.rsplit(':', 1)
except ValueError:
continue
b, e = rng.split('-')
print(datetime.strptime(thetime, fmt) - datetime.strptime(start, fmt), contig, int(e) - int(b))
else:
times[contig] = thetime
| 23.264706 | 103 | 0.591656 |
acebbb3dabf9636d141a3c67d79ac511f9ff261a | 5,975 | py | Python | raksha/openstack/common/jsonutils.py | DPaaS-Raksha/raksha | e4e482865d2860473bc0a80e10d76bb127e9f6c5 | [
"Apache-2.0"
] | 8 | 2015-03-19T20:22:44.000Z | 2021-04-11T06:00:52.000Z | raksha/openstack/common/jsonutils.py | DPaaS-Raksha/raksha | e4e482865d2860473bc0a80e10d76bb127e9f6c5 | [
"Apache-2.0"
] | 1 | 2015-07-21T23:05:23.000Z | 2016-03-16T08:11:54.000Z | raksha/openstack/common/jsonutils.py | DPaaS-Raksha/raksha | e4e482865d2860473bc0a80e10d76bb127e9f6c5 | [
"Apache-2.0"
] | 5 | 2015-10-09T17:42:24.000Z | 2021-03-11T18:33:00.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import types
import xmlrpclib
import six
from raksha.openstack.common import timeutils
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long)
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| 35.147059 | 79 | 0.64954 |
acebbc4e77beab85140be2d2badad1f51e521e6b | 15,530 | py | Python | keystone/tests/protection/v3/test_service_providers.py | 10088/keystone | 1561da645b6512decdc0d307d2ec79a8a4c9cc87 | [
"Apache-2.0"
] | 615 | 2015-01-07T12:32:52.000Z | 2022-03-24T03:49:47.000Z | keystone/tests/protection/v3/test_service_providers.py | 10088/keystone | 1561da645b6512decdc0d307d2ec79a8a4c9cc87 | [
"Apache-2.0"
] | 11 | 2015-04-13T18:52:40.000Z | 2021-08-21T06:13:05.000Z | keystone/tests/protection/v3/test_service_providers.py | 10088/keystone | 1561da645b6512decdc0d307d2ec79a8a4c9cc87 | [
"Apache-2.0"
] | 696 | 2015-01-15T00:31:07.000Z | 2022-03-16T09:56:00.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import http.client
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _SystemUserServiceProviderTests(object):
"""Common default functionality for all system users."""
def test_user_can_list_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
with self.test_client() as c:
r = c.get(
'/v3/OS-FEDERATION/service_providers', headers=self.headers
)
self.assertEqual(1, len(r.json['service_providers']))
self.assertEqual(
service_provider['id'], r.json['service_providers'][0]['id']
)
def test_user_can_get_a_service_provider(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
with self.test_client() as c:
r = c.get(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers
)
self.assertEqual(
service_provider['id'], r.json['service_provider']['id']
)
class _SystemReaderAndMemberUserServiceProviderTests(object):
"""Common default functionality for system readers and system members."""
def test_user_cannot_create_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
service_provider = unit.new_service_provider_ref()
create = {'service_provider': service_provider}
with self.test_client() as c:
c.put(
'/v3/OS-FEDERATION/service_providers/%s' % uuid.uuid4().hex,
headers=self.headers,
json=create,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
update = {'service_provider': {'enabled': False}}
with self.test_client() as c:
c.patch(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers,
json=update,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
with self.test_client() as c:
c.delete(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class _DomainAndProjectUserServiceProviderTests(object):
"""Common functionality for all domain and project users."""
def test_user_cannot_create_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
service_provider = unit.new_service_provider_ref()
create = {'service_provider': service_provider}
with self.test_client() as c:
c.put(
'/v3/OS-FEDERATION/service_providers/%s' % uuid.uuid4().hex,
headers=self.headers,
json=create,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
update = {'service_provider': {'enabled': False}}
with self.test_client() as c:
c.patch(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers,
json=update,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_list_service_providers(self):
PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
with self.test_client() as c:
c.get(
'/v3/OS-FEDERATION/service_providers', headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_get_a_service_provider(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
with self.test_client() as c:
c.get(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
with self.test_client() as c:
c.delete(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserServiceProviderTests,
_SystemReaderAndMemberUserServiceProviderTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserServiceProviderTests,
_SystemReaderAndMemberUserServiceProviderTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserServiceProviderTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
# Reuse the system administrator account created during
# ``keystone-manage bootstrap``
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
service_provider = unit.new_service_provider_ref()
create = {'service_provider': service_provider}
with self.test_client() as c:
c.put(
'/v3/OS-FEDERATION/service_providers/%s' % uuid.uuid4().hex,
headers=self.headers,
json=create,
expected_status_code=http.client.CREATED
)
def test_user_can_update_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
update = {'service_provider': {'enabled': False}}
with self.test_client() as c:
c.patch(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers,
json=update
)
def test_user_can_delete_service_providers(self):
service_provider = PROVIDERS.federation_api.create_sp(
uuid.uuid4().hex, unit.new_service_provider_ref()
)
with self.test_client() as c:
c.delete(
'/v3/OS-FEDERATION/service_providers/%s' %
service_provider['id'],
headers=self.headers
)
class DomainUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserServiceProviderTests):
def setUp(self):
super(DomainUserTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=domain_admin['password'],
domain_id=self.domain_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserServiceProviderTests):
def setUp(self):
super(ProjectUserTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
project_id=self.bootstrapper.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectUserTestsWithoutEnforceScope(
base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserServiceProviderTests):
def setUp(self):
super(ProjectUserTestsWithoutEnforceScope, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
# Explicityly set enforce_scope to False to make sure we maintain
# backwards compatibility with project users.
self.config_fixture.config(group='oslo_policy', enforce_scope=False)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = unit.new_user_ref(domain_id=domain['id'])
self.user_id = PROVIDERS.identity_api.create_user(user)['id']
self.project_id = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id'])
)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=user['password'],
project_id=self.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
| 36.800948 | 78 | 0.627366 |
acebbcb170ff4a86f7309671bea015149f643a94 | 4,914 | py | Python | polynom/domain/domain.py | oznurkalkar/polynom | 13c85c5570995350000bf93fbcf4b8abe3c686e8 | [
"Apache-2.0"
] | null | null | null | polynom/domain/domain.py | oznurkalkar/polynom | 13c85c5570995350000bf93fbcf4b8abe3c686e8 | [
"Apache-2.0"
] | null | null | null | polynom/domain/domain.py | oznurkalkar/polynom | 13c85c5570995350000bf93fbcf4b8abe3c686e8 | [
"Apache-2.0"
] | null | null | null | from polynom.domain.fft import perform_fft
from typing import Union
from polynom.ecc import Scalar, one, zero
from polynom.utils import log2, pad
from polynom.polynomial import Polynomial
class DomainConfig:
def __init__(self, root_of_unity: Scalar, s: int, exp: int, k2: Scalar, k: int = 1):
self.root_of_unity = root_of_unity
self.k = k
self.s = s
self.exp = exp
self.k2 = k2
self.inv_k2 = one / k2
self.n = 1 << exp
self.inv_n = one / self.n
def calculate_domain(w: Scalar, exp: int, k: int = 1) -> list[Scalar]:
n = 1 << exp
W = [Scalar(k)] + (n - 1) * [Scalar(0)]
for i in range(1, n):
W[i] = W[i - 1] * w
return W
def inverse_domain(domain: list[Scalar]) -> list[Scalar]:
inverse_domain = []
for w in domain:
inverse_domain.append(one / w)
return inverse_domain
class Domain:
def __init__(self, config: DomainConfig):
w = config.root_of_unity
self.exp = config.exp
self.s = config.s
self.k = config.k
self.k2 = config.k2
self.inv_k2 = config.inv_k2
self.n = config.n
self.inv_n = config.inv_n
for _ in range(self.exp, self.s):
w = w**2
# self.w = w
self.domain = calculate_domain(w, self.exp, self.k)
self.inverse_domain = inverse_domain(self.domain)
def extend(self, poly: Polynomial):
assert poly.n() <= self.n
k = log2(poly.n())
u = 1 << self.exp - k
coeffs: list[Scalar] = []
for a in poly.coeffs:
coeffs += [a] + [Scalar(0)] * (u - 1)
return Polynomial(coeffs)
def coset(self, k):
return [k * w for w in self.domain]
def lagrange_polynomial(self, i: int) -> Polynomial:
assert i < self.n
coeffs = [Scalar(0)] * self.n
coeffs[i] = one
return self.interpolate(Polynomial(coeffs))
def lagrange_evaluation_range(self, i: int, j: int, zeta: Scalar) -> Scalar:
pass
def lagrange_evaluation(self, i: int, zeta: Scalar) -> Scalar:
assert i < self.n
n = self.n
zeta_n, w = zeta**n, self.domain[i]
u = (zeta_n - one) * w
v = (zeta - w) * n
return u / v
def new_poly(self, coeffs) -> Polynomial:
assert len(coeffs) == self.n
return Polynomial(coeffs)
def i(self, *input: Union[Polynomial, list[Scalar]]) -> list[Polynomial]:
return [self.interpolate(poly) for poly in input]
def interpolate(self, poly: Union[Polynomial, list[Scalar]]) -> Polynomial:
coeffs = None
if isinstance(poly, Polynomial):
assert len(poly) <= self.n
coeffs = pad(poly.coeffs, self.n)
coeffs = [c * self.inv_n for c in perform_fft(coeffs, self.inverse_domain)]
return Polynomial(coeffs)
assert isinstance(poly, list)
coeffs = pad(poly, self.n)
coeffs = [c * self.inv_n for c in perform_fft(coeffs, self.inverse_domain)]
return Polynomial(coeffs)
def evaluate(self, poly) -> Polynomial:
coeffs = pad(poly.coeffs, self.n)
coeffs = perform_fft(coeffs, self.domain)
return Polynomial(coeffs)
def w(self) -> Scalar:
return self.domain[1]
def omega(self, poly: Polynomial) -> Polynomial:
return poly.distribute(self.w())
def vanishing(self) -> Polynomial:
return Polynomial([-one] + [zero] * (self.n - 1) + [one])
def mul(self, *v: Polynomial) -> Polynomial:
assert len(v) > 1
for u in v:
assert u.n() <= self.n
if u.is_zero():
return Polynomial([zero] * self.n)
acc = perform_fft(pad(v[0].coeffs, self.n), self.domain)
for i in range(1, len(v)):
v_i_evals = perform_fft(pad(v[i].coeffs, self.n), self.domain)
acc = [u * v for u, v in zip(acc, v_i_evals)]
coeffs = [c * self.inv_n for c in perform_fft(acc, self.inverse_domain)]
return self.new_poly(coeffs)
def div(self, a: Polynomial, b: Polynomial) -> Polynomial:
if a.is_zero() or b.is_zero():
return Polynomial([zero] * self.n)
assert a.n() <= self.n
assert b.n() <= self.n
a_evals = perform_fft(pad(a.coeffs, self.n), self.domain)
b_evals = perform_fft(pad(b.coeffs, self.n), self.domain)
b_evals = [1 / u for u in b_evals]
mul_evals = [u * v for u, v in zip(a_evals, b_evals)]
coeffs = [c * self.inv_n for c in perform_fft(mul_evals, self.inverse_domain)]
return self.new_poly(coeffs)
def coset_div(self, a: Polynomial, b: Polynomial) -> Polynomial:
k2 = self.k2
inv_k2 = self.inv_k2
if a.is_zero():
return a.clone()
u = self.div(a.distribute(k2), b.distribute(k2))
return u.distribute(inv_k2)
| 30.521739 | 88 | 0.57733 |
acebbd2500a46e8299256857e50663900a00cb9c | 1,992 | py | Python | mmdet-v2/configs/tile/_base_/datasets/tile_cut_800x800.py | li-phone/DetectionCompetition | a917f16790ec30358e3cfe1aa6e327a2070a1235 | [
"Apache-2.0"
] | null | null | null | mmdet-v2/configs/tile/_base_/datasets/tile_cut_800x800.py | li-phone/DetectionCompetition | a917f16790ec30358e3cfe1aa6e327a2070a1235 | [
"Apache-2.0"
] | null | null | null | mmdet-v2/configs/tile/_base_/datasets/tile_cut_800x800.py | li-phone/DetectionCompetition | a917f16790ec30358e3cfe1aa6e327a2070a1235 | [
"Apache-2.0"
] | null | null | null | dataset_type = 'TileDataset'
data_root = 'data/tile/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(4000, 3000),
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/cut_800x800/cut_800x800_train.json',
img_prefix=data_root + 'trainval/cut_800x800',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/cut_800x800/cut_800x800_val.json',
img_prefix=data_root + 'trainval/cut_800x800',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/backup/instance_XS_train.json',
img_prefix=data_root + 'raw/tile_round1_train_20201231/train_imgs',
pipeline=test_pipeline),
# test=dict(
# type=dataset_type,
# ann_file=data_root + 'annotations/cut_800x800/cut_800x800_val.json',
# img_prefix=data_root + 'trainval/cut_800x800',
# pipeline=test_pipeline),
)
evaluation = dict(interval=1, metric='bbox')
| 36.218182 | 78 | 0.641064 |
acebbd428269d258f99e22966c204a1fab9bfbd4 | 122 | py | Python | climateradials/streamlit.debug.py | kastnerp/ClimateRadials | 14a7902a2bb6dd4a985c980a1bbb6c386c0537b7 | [
"MIT"
] | 2 | 2020-12-16T12:00:02.000Z | 2021-09-21T08:14:31.000Z | climateradials/streamlit.debug.py | kastnerp/ClimateRadials | 14a7902a2bb6dd4a985c980a1bbb6c386c0537b7 | [
"MIT"
] | null | null | null | climateradials/streamlit.debug.py | kastnerp/ClimateRadials | 14a7902a2bb6dd4a985c980a1bbb6c386c0537b7 | [
"MIT"
] | null | null | null | from streamlit import bootstrap
real_script = 'streamlit_app.py'
bootstrap.run(real_script, f'run.py {real_script}', []) | 24.4 | 55 | 0.770492 |
acebbdd254599f02e8af3119a505496fb3c1769a | 1,016 | py | Python | tests/test_post.py | sharonkorir/my-two-cents | 5611bde8b76d7367a893b17f56c861c5632bff96 | [
"MIT"
] | null | null | null | tests/test_post.py | sharonkorir/my-two-cents | 5611bde8b76d7367a893b17f56c861c5632bff96 | [
"MIT"
] | null | null | null | tests/test_post.py | sharonkorir/my-two-cents | 5611bde8b76d7367a893b17f56c861c5632bff96 | [
"MIT"
] | null | null | null |
import unittest
from app.models import Post, User
class PostModelTest(unittest.TestCase):
def setUp(self):
self.new_user = User(username = 'Sharon',password = 'potato', email = 'sharon@em.com')
self.new_post = Post(content = 'random post content to test', title = 'random test post', user = self.new_user)
def test_instance(self):
self.assertTrue(isinstance(self.new_post,Post))
self.assertTrue(isinstance(self.new_user,User))
def tearDown(self):
Post.query.delete()
User.query.delete()
def test_save_post(self):
self.new_post.save_post()
self.assertTrue(len(Post.query.all())>0)
def test_get_posts_by_id(self):
self.new_post.save_post()
got_post = Post.get_posts(1)
self.assertTrue(len(got_post) == 1)
def test_delete_post(self):
got_post = Post.get_posts(id = self.new_post.id)
self.new_post.delete_posts(got_post)
self.assertTrue(len(Post.query.all())==0)
| 30.787879 | 119 | 0.652559 |
acebbe0e5f74d65aaa554aa7084881cd801d9f85 | 828 | py | Python | twilio django/twilio_video_call/ajax_conn/migrations/0001_initial.py | kabirivan/Remote_Assistant | 4560f721701defa23358120588fe5822a97a1054 | [
"MIT"
] | null | null | null | twilio django/twilio_video_call/ajax_conn/migrations/0001_initial.py | kabirivan/Remote_Assistant | 4560f721701defa23358120588fe5822a97a1054 | [
"MIT"
] | null | null | null | twilio django/twilio_video_call/ajax_conn/migrations/0001_initial.py | kabirivan/Remote_Assistant | 4560f721701defa23358120588fe5822a97a1054 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-16 17:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nick_name', models.CharField(max_length=100, unique=True)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('likes', models.CharField(max_length=250)),
('dob', models.DateField()),
('lives_in', models.CharField(blank=True, max_length=150, null=True)),
],
),
]
| 30.666667 | 114 | 0.571256 |
acebbf392e9e36772998e40a8b4207f9af5f6c6b | 623 | py | Python | sinfo/componentes/migrations/0002_alter_cpu_options_alter_memoriaram_options_and_more.py | webdesigncuba/Sinfo | 15998b43057b0c0f13083a3017f27740c64239bf | [
"MIT"
] | null | null | null | sinfo/componentes/migrations/0002_alter_cpu_options_alter_memoriaram_options_and_more.py | webdesigncuba/Sinfo | 15998b43057b0c0f13083a3017f27740c64239bf | [
"MIT"
] | null | null | null | sinfo/componentes/migrations/0002_alter_cpu_options_alter_memoriaram_options_and_more.py | webdesigncuba/Sinfo | 15998b43057b0c0f13083a3017f27740c64239bf | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2021-12-20 12:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('componentes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='cpu',
options={'verbose_name_plural': 'CPU'},
),
migrations.AlterModelOptions(
name='memoriaram',
options={'verbose_name_plural': 'Memorias Ram'},
),
migrations.AlterModelOptions(
name='placabase',
options={'verbose_name_plural': 'Mother Boards'},
),
]
| 23.961538 | 61 | 0.574639 |
acebc0009e06a990425caa2704ec557a4dfb4f5f | 681 | py | Python | .venv/bin/django-admin.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | null | null | null | .venv/bin/django-admin.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | null | null | null | .venv/bin/django-admin.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | null | null | null | #!/workspaces/label-studio/.venv/bin/python3.8
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 30.954545 | 80 | 0.723935 |
acebc0232a67b616ab1a099f485389b09b8daf0e | 303 | py | Python | pyzeta/instances/eitherfunctor.py | victoradan/pythonZeta | 6aac3087a9ad8b17c2006126f1f39a519dbc8463 | [
"MIT"
] | null | null | null | pyzeta/instances/eitherfunctor.py | victoradan/pythonZeta | 6aac3087a9ad8b17c2006126f1f39a519dbc8463 | [
"MIT"
] | null | null | null | pyzeta/instances/eitherfunctor.py | victoradan/pythonZeta | 6aac3087a9ad8b17c2006126f1f39a519dbc8463 | [
"MIT"
] | null | null | null | from pyzeta.typeclasses.functor import Functor
from pyzeta.registry import register
from pyzeta.datatypes.either import Either, Right
class EitherFunctor(Functor):
@staticmethod
def fmap(f, fa):
return Right(f(fa.right)) if fa.right else fa
register('Functor', Either, EitherFunctor)
| 25.25 | 53 | 0.759076 |
acebc076ae994aa07366a021cab9937678e370f0 | 520 | py | Python | apcommand/documentation/user/broadcom_command_line.py | russellnakamura/apcommand | 84a8ac522967477e10e51d3583f83c3b7de1ac2b | [
"MIT"
] | null | null | null | apcommand/documentation/user/broadcom_command_line.py | russellnakamura/apcommand | 84a8ac522967477e10e51d3583f83c3b7de1ac2b | [
"MIT"
] | null | null | null | apcommand/documentation/user/broadcom_command_line.py | russellnakamura/apcommand | 84a8ac522967477e10e51d3583f83c3b7de1ac2b | [
"MIT"
] | null | null | null |
# python standard library
import subprocess
# this package
from apcommand.broadcom.argumentparser import Arguments
arguments = Arguments()
arguments.add_arguments()
arguments.add_subparsers()
parser = arguments.parser
# this is so that it doesn't say 'Pweave' as the name
parser.prog = 'broadcom'
parser.print_help()
print subprocess.check_output('broadcom status --help'.split())
print subprocess.check_output('broadcom channel --help'.split())
print subprocess.check_output('broadcom enable --help'.split())
| 20.8 | 64 | 0.778846 |
acebc27dcd8def4fadf2c5923f51cc00856533ff | 508 | py | Python | modules/python-codes/modules/conditionals/src/elif.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | 1 | 2020-09-06T22:17:19.000Z | 2020-09-06T22:17:19.000Z | modules/python-codes/modules/conditionals/src/elif.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | null | null | null | modules/python-codes/modules/conditionals/src/elif.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | null | null | null | ########################################################
# Rodrigo Leite - drigols #
# Last update: 01/11/2021 #
########################################################
goal = 20000
sales = float(input("Enter sales amount: "))
if sales < goal:
print("No bonus.")
elif sales > (goal*2):
bonus = (0.07 * sales) # 7% bonus.
print("Bonus 7%: {0}".format(bonus))
else:
bonus = (0.03 * sales) # 3% bonus.
print("Bonus 3%: {0}".format(bonus))
| 28.222222 | 56 | 0.397638 |
acebc3454ed7e3c20e8fd1c2f05864691fb5706e | 4,401 | py | Python | medfw.py | cjymz886/find-new-Chinese-medical-words | fb68a49a032ecf8a1e345e768a93352db4ab1978 | [
"MIT"
] | 72 | 2018-06-09T09:43:05.000Z | 2022-03-10T06:57:13.000Z | medfw.py | cjymz886/find-new-Chinese-medical-words | fb68a49a032ecf8a1e345e768a93352db4ab1978 | [
"MIT"
] | 2 | 2019-03-21T07:41:22.000Z | 2021-06-07T00:52:02.000Z | medfw.py | cjymz886/find-new-Chinese-medical-words | fb68a49a032ecf8a1e345e768a93352db4ab1978 | [
"MIT"
] | 20 | 2018-07-11T07:29:47.000Z | 2020-12-28T02:14:51.000Z | from __future__ import absolute_import
__version__ = '1.0'
__license__ = 'MIT'
import os
import logging
import time
import codecs
import sys
from module.corpus_count import *
from module.corpus_segment import *
from module.select_model import *
from module.words_search import *
medfw_path=os.getcwd()
file_corpus=medfw_path+'/data/file_corpus.txt'
file_dict=medfw_path+'/data/dict.txt'
file_count_one=medfw_path+'/data/count_one.txt'
file_count_two=medfw_path+'/data/count_two.txt'
file_segment=medfw_path+'/data/file_segment.txt'
log_console = logging.StreamHandler(sys.stderr)
default_logger = logging.getLogger(__name__)
default_logger.setLevel(logging.DEBUG)
default_logger.addHandler(log_console)
def setLogLevel(log_level):
global logger
default_logger.setLevel(log_level)
class MedFW(object):
def __init__(self,K=10.8,H=2000,R=60,Y=5000):
self.K=K #parameter of PMI to select stable words in step2
self.H=H #paramteter of top H segment words to search engine in step4
self.R=R #parameter of over the frequency of segment words in search engine to add dict in step4
self.Y=Y #parameter of the conditions for ending the iteration
self.seg_num=0 #the number of segment corpus
self.search_num=0 #the number of add words to file_dict by search engine
#step1: count corpus
def medfw_s1(self):
for i in range(1,3):
if i==1:
file_count=file_count_one
else:
file_count=file_count_two
default_logger.debug("Counting courpus to get %s...\n" % (file_count))
t1 = time.time()
cc=Finding(file_corpus,file_count,i)
cc.find_word()
default_logger.debug("Getting %s cost %.3f seconds...\n" % (file_count,time.time() - t1))
#step2: select stable words and generate initial vocabulary
def medfw_s2(self):
default_logger.debug("Select stable words and generate initial vocabulary... \n")
select(file_count_one,file_count_two,file_dict,self.K)
#step3: use initial vocabulary to segment corpus
def medfw_s3(self):
t1 = time.time()
sc=Cuting(file_corpus,file_dict,file_segment)
self.seg_num=sc.find()
default_logger.debug("Segment corpuscost %.3f seconds...\n" % (time.time() - t1))
#step4:use search engine to select words of segment corpus
def medfw_s4(self,H,R,iternum):
t1 = time.time()
self.search_num=search(file_segment,file_dict,H,R,iternum)
default_logger.debug("Select words cost %.3f seconds...\n" % (time.time() - t1))
def medfw(self):
# default_logger.debug("Starting to find words and do step1...\n" )
print('-----------------------------------')
print('step1:count corpus')
self.medfw_s1()
print('-----------------------------------')
print('step2:select stable words and generate initial vocabulary')
self.medfw_s2()
print('-----------------------------------')
print('step3:use initial vocabulary to segment corpus')
self.medfw_s3()
print('-----------------------------------')
print('step4:use search engine to select words of segment corpus')
self.medfw_s4(H=self.H,R=self.H,iternum=0)
print('-----------------------------------')
print('step5:cycling iteration')
iter_num=1
while True:
if self.search_num:
default_logger.debug("Itering %d...\n" % (iter_num))
t1 = time.time()
self.medfw_s3()
if self.seg_num<=self.Y:
self.H=self.seg_num
self.medfw_s4(H=self.H,R=self.R,iternum=iter_num)
default_logger.debug("Ending the iteration ...\n")
break
else:
self.medfw_s4(H=self.H,R=self.R,iternum=iter_num)
iter_num+=1
default_logger.debug("Itering %d cost %.3f seconds...\n " % ((iter_num-1), time.time() - t1))
else:
break
with codecs.open(file_dict, 'r', encoding='utf-8') as f:
total_num=len(f.readlines())
print('Having succcessfuly find %d words from corpus '%total_num)
if __name__ == '__main__':
md=MedFW(K=10.8,H=2000,R=60,Y=5000)
md.medfw()
| 34.928571 | 109 | 0.607135 |
acebc4a0ed61477ac234c1ec95be80dd42e33686 | 2,587 | py | Python | examples/02_record_decode.py | wangyu09/exkaldi-rt | 16b0b0642dbd6134afffa5b613a70630fddd8940 | [
"Apache-2.0"
] | 41 | 2021-04-06T11:16:28.000Z | 2022-03-05T19:37:29.000Z | examples/02_record_decode.py | wangyu09/exkaldi-rt | 16b0b0642dbd6134afffa5b613a70630fddd8940 | [
"Apache-2.0"
] | 5 | 2021-04-22T15:14:26.000Z | 2021-07-22T12:25:45.000Z | examples/02_record_decode.py | wangyu09/exkaldi-rt | 16b0b0642dbd6134afffa5b613a70630fddd8940 | [
"Apache-2.0"
] | 8 | 2021-04-07T07:22:57.000Z | 2021-12-07T03:41:13.000Z | from exkaldirt import base,stream,feature,decode
from exkaldirt.base import info
from neural_networks import make_DNN_acoustic_model
import time
##########################
# Hyperparameters
##########################
kerasModel = "model.h5"
words = "words.txt"
hmm = "final.mdl"
HCLG = "HCLG.fst"
delta = 2
spliceLeft = 10
spliceRight = 10
featDim = (13*(delta+1)) * (spliceLeft+1+spliceRight)
##########################
# Load DNN acoustic model
##########################
pdfDim = decode.get_pdf_dim(hmm)
kerasmodel = make_DNN_acoustic_model(featDim,pdfDim)
kerasmodel.load_weights(kerasModel)
##########################
# Define components
##########################
# 1. Create a stream recorder to read realtime stream from microphone
recorder = stream.StreamRecorder()
# 2. Cutter to cut frame
cutter = stream.ElementFrameCutter(batchSize=50,width=400,shift=160)
# 3. MFCC feature extracting
extractor = feature.MfccExtractor(
useEnergy=False,
)
# 4. processing feature
processor = feature.MatrixFeatureProcessor(
delta=delta,
spliceLeft=spliceLeft,
spliceRight=spliceRight,
cmvNormalizer=feature.FrameSlideCMVNormalizer(),
)
# 5. acoustic probability computer
def keras_compute(feats):
return kerasmodel(feats,training=False).numpy()
estimator = decode.AcousticEstimator(
keras_compute,
applySoftmax=True,
applyLog=True,
)
# 6. online decoder
decoder = decode.WfstDecoder(
symbolTable=words,
silencePhones="1:2:3:4:5:6:7:8:9:10",
frameShiftSec=160/16000,
tmodel=hmm,
graph=HCLG,
beam=10,
latticeBeam=8,
minActive=200,
maxActive=7000,
acousticScale=0.1
)
##########################
# Link components
##########################
chain = base.Chain()
chain.add(recorder)
chain.add(cutter)
chain.add(extractor)
chain.add(processor)
chain.add(estimator)
chain.add(decoder)
##########################
# Run and display the results
##########################
chain.start()
base.dynamic_display(chain.outPIPE, mapFunc=lambda packet:print("Result >> ",packet[packet.mainKey])) | 29.067416 | 101 | 0.521453 |
acebc51bfd8bf57c1d58f1dae61715461626cd3d | 11,100 | py | Python | labml/experiment.py | conanjm/labml | 6a17bab4527e15897b570d64d2346d9aef8f88ff | [
"MIT"
] | 1 | 2021-01-08T20:32:07.000Z | 2021-01-08T20:32:07.000Z | labml/experiment.py | conanjm/labml | 6a17bab4527e15897b570d64d2346d9aef8f88ff | [
"MIT"
] | null | null | null | labml/experiment.py | conanjm/labml | 6a17bab4527e15897b570d64d2346d9aef8f88ff | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Optional, Set, Dict, List, Union, TYPE_CHECKING, overload
import numpy as np
from labml.configs import BaseConfigs
from labml.internal.experiment import \
create_experiment as _create_experiment, \
experiment_singleton as _experiment_singleton, \
ModelSaver
from labml.internal.monitor import monitor_singleton as monitor
from labml.internal.experiment.experiment_run import \
get_configs as _get_configs
if TYPE_CHECKING:
import torch
def generate_uuid() -> str:
from uuid import uuid1
return uuid1().hex
def save_checkpoint():
r"""
Saves model checkpoints
"""
_experiment_singleton().save_checkpoint()
def get_uuid():
r"""
Returns the UUID of the current experiment run
"""
return _experiment_singleton().run.uuid
def create(*,
uuid: Optional[str] = None,
name: Optional[str] = None,
python_file: Optional[str] = None,
comment: Optional[str] = None,
writers: Set[str] = None,
ignore_callers: Set[str] = None,
tags: Optional[Set[str]] = None,
disable_screen: bool = False):
r"""
Create an experiment
Keyword Arguments:
name (str, optional): name of the experiment
python_file (str, optional): path of the Python file that
created the experiment
comment (str, optional): a short description of the experiment
writers (Set[str], optional): list of writers to write stat to.
Defaults to ``{'tensorboard', 'sqlite', 'web_api'}``.
ignore_callers: (Set[str], optional): list of files to ignore when
automatically determining ``python_file``
tags (Set[str], optional): Set of tags for experiment
"""
if writers is None:
writers = {'screen', 'sqlite', 'tensorboard', 'web_api'}
if disable_screen and 'screen' in writers:
writers.remove('screen')
if ignore_callers is None:
ignore_callers = set()
if uuid is None:
uuid = generate_uuid()
monitor().clear()
_create_experiment(uuid=uuid,
name=name,
python_file=python_file,
comment=comment,
writers=writers,
ignore_callers=ignore_callers,
tags=tags,
is_evaluate=False)
def evaluate():
r"""
This should be used for evaluation of a saved experiment.
This will not record anything.
"""
monitor().clear()
_create_experiment(uuid=generate_uuid(),
name=None,
python_file=None,
comment=None,
writers=set(),
ignore_callers=set(),
tags=None,
is_evaluate=True)
def distributed(rank: int, world_size: int):
_experiment_singleton().distributed(rank, world_size)
def add_model_savers(savers: Dict[str, ModelSaver]):
_experiment_singleton().checkpoint_saver.add_savers(savers)
@overload
def add_pytorch_models(**kwargs: 'torch.nn.Module'):
...
@overload
def add_pytorch_models(models: Dict[str, 'torch.nn.Module']):
...
def add_pytorch_models(*args, **kwargs):
"""
Set variables for saving and loading
Arguments:
models (Dict[str, torch.nn.Module]): a dictionary of torch modules
used in the experiment.
These will be saved with :func:`labml.experiment.save_checkpoint`
and loaded with :func:`labml.experiment.load`.
"""
from labml.internal.experiment.pytorch import add_models as _add_pytorch_models
if len(args) > 0:
assert len(args) == 1
assert isinstance(args[0], dict)
_add_pytorch_models(args[0])
else:
_add_pytorch_models(kwargs)
def add_sklearn_models(models: Dict[str, any]):
"""
.. warning::
This is still experimental.
Set variables for saving and loading
Arguments:
models (Dict[str, any]): a dictionary of SKLearn models
These will be saved with :func:`labml.experiment.save_checkpoint`
and loaded with :func:`labml.experiment.load`.
"""
from labml.internal.experiment.sklearn import add_models as _add_sklearn_models
_add_sklearn_models(models)
@overload
def configs(conf_dict: Dict[str, any]):
...
@overload
def configs(conf_dict: Dict[str, any], conf_override: Dict[str, any]):
...
@overload
def configs(conf: BaseConfigs):
...
@overload
def configs(conf: BaseConfigs, conf_override: Dict[str, any]):
...
def configs(*args):
r"""
Calculate configurations
This has multiple overloads
.. function:: configs(conf_dict: Dict[str, any])
:noindex:
.. function:: configs(conf_dict: Dict[str, any], conf_override: Dict[str, any])
:noindex:
.. function:: configs(conf: BaseConfigs)
:noindex:
.. function:: configs(conf: BaseConfigs, run_order: List[Union[List[str], str]])
:noindex:
.. function:: configs(conf: BaseConfigs, *run_order: str)
:noindex:
.. function:: configs(conf: BaseConfigs, conf_override: Dict[str, any])
:noindex:
.. function:: configs(conf: BaseConfigs, conf_override: Dict[str, any], run_order: List[Union[List[str], str]])
:noindex:
.. function:: configs(conf: BaseConfigs, conf_override: Dict[str, any], *run_order: str)
:noindex:
Arguments:
conf (BaseConfigs, optional): configurations object
conf_dict (Dict[str, any], optional): a dictionary of configs
conf_override (Dict[str, any], optional): a dictionary of
configs to be overridden
"""
conf_override: Optional[Dict[str, any]] = None
conf = args[0]
idx = 1
if idx < len(args) and isinstance(args[idx], dict):
conf_override = args[idx]
idx += 1
if len(args) != idx:
raise RuntimeError("Invalid call to calculate configs")
if isinstance(conf, BaseConfigs):
_experiment_singleton().calc_configs(conf, conf_override)
elif isinstance(conf, dict):
_experiment_singleton().calc_configs(conf, conf_override)
else:
raise RuntimeError("Invalid call to calculate configs")
_load_run_uuid: Optional[str] = None
_load_checkpoint: Optional[int] = None
def start():
r"""
Starts the experiment.
Run it using ``with`` statement and it will monitor and report, experiment completion
and exceptions.
"""
global _load_run_uuid
global _load_checkpoint
return _experiment_singleton().start(run_uuid=_load_run_uuid, checkpoint=_load_checkpoint)
def load_configs(run_uuid: str, *, is_only_hyperparam: bool = True):
r"""
Load configs of a previous run
Arguments:
run_uuid (str): if provided the experiment will start from
a saved state in the run with UUID ``run_uuid``
Keyword Arguments:
is_only_hyperparam (bool, optional): if True all only the hyper parameters
are returned
"""
conf = _get_configs(run_uuid)
if not conf:
return {}
values = {}
for k, c in conf.items():
is_hyperparam = c.get('is_hyperparam', None)
is_explicit = c.get('is_explicitly_specified', False)
if not is_only_hyperparam:
values[k] = c['value']
elif is_hyperparam is None and is_explicit:
values[k] = c['value']
elif is_hyperparam:
values[k] = c['value']
return values
def load(run_uuid: str, checkpoint: Optional[int] = None):
r"""
Loads a the run from a previous checkpoint.
You need to separately call ``experiment.start`` to start the experiment.
Arguments:
run_uuid (str): experiment will start from
a saved state in the run with UUID ``run_uuid``
checkpoint (str, optional): if provided the experiment will start from
given checkpoint. Otherwise it will start from the last checkpoint.
"""
global _load_run_uuid
global _load_checkpoint
_load_run_uuid = run_uuid
_load_checkpoint = checkpoint
def load_models(models: List[str], run_uuid: str, checkpoint: Optional[int] = None):
r"""
Loads and starts the run from a previous checkpoint.
Arguments:
models (List[str]): List of names of models to be loaded
run_uuid (str): experiment will start from
a saved state in the run with UUID ``run_uuid``
checkpoint (str, optional): if provided the experiment will start from
given checkpoint. Otherwise it will start from the last checkpoint.
"""
_experiment_singleton().load_models(models=models, run_uuid=run_uuid, checkpoint=checkpoint)
def save_numpy(name: str, array: np.ndarray):
r"""
Saves a single numpy array. This is used to save processed data.
"""
numpy_path = Path(_experiment_singleton().run.numpy_path)
if not numpy_path.exists():
numpy_path.mkdir(parents=True)
file_name = name + ".npy"
np.save(str(numpy_path / file_name), array)
def record(*,
name: Optional[str] = None,
comment: Optional[str] = None,
writers: Set[str] = None,
tags: Optional[Set[str]] = None,
exp_conf: Dict[str, any] = None,
lab_conf: Dict[str, any] = None,
token: str = None,
disable_screen: bool = False):
r"""
This is combines :func:`create`, :func:`configs` and :func:`start`.
Keyword Arguments:
name (str, optional): name of the experiment
comment (str, optional): a short description of the experiment
writers (Set[str], optional): list of writers to write stat to.
Defaults to ``{'tensorboard', 'sqlite', 'web_api'}``.
tags (Set[str], optional): Set of tags for experiment
exp_conf (Dict[str, any], optional): a dictionary of experiment configurations
lab_conf (Dict[str, any], optional): a dictionary of configurations for LabML.
Use this if you want to change default configurations such as ``web_api``, and
``data_path``.
token (str, optional): a shortcut to provide LabML mobile app token (or url - ``web_api``)
instead of including it in ``lab_conf``. You can set this with :func:`labml.lab.configure`,
`or with a configuration file for the entire project <../guide/installation_setup.html>`_.
"""
if token is not None:
if lab_conf is None:
lab_conf = {}
lab_conf['web_api'] = token
if lab_conf is not None:
from labml.internal.lab import lab_singleton as _internal
_internal().set_configurations(lab_conf)
create(name=name,
python_file=None,
comment=comment,
writers=writers,
ignore_callers=None,
tags=tags,
disable_screen=disable_screen)
if exp_conf is not None:
configs(exp_conf)
return start()
| 29.679144 | 115 | 0.633784 |
acebc61735051c3dc6201c7e20a0464c55cb642d | 10,695 | py | Python | otp/namepanel/NameCheck.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | null | null | null | otp/namepanel/NameCheck.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | otp/namepanel/NameCheck.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | import string
from otp.otpbase import OTPLocalizer
from direct.directnotify import DirectNotifyGlobal
from panda3d.core import NSError, TextEncoder, TextNode
notify = DirectNotifyGlobal.directNotify.newCategory('NameCheck')
def filterString(str, filter):
result = ''
for char in str:
if char in filter:
result = result + char
return result
def justLetters(str):
letters = ''
for c in str:
if c.isalpha():
letters = letters + c
return letters
def justUpper(str):
upperCaseLetters = ''
for c in str:
if c.upper() != c.lower():
if c == c.upper():
upperCaseLetters = upperCaseLetters + c
return upperCaseLetters
def wordList(str):
words = str.split()
result = []
for word in words:
subWords = word.split('-')
for sw in subWords:
if sw:
result.append(sw)
return result
def checkName(name, otherCheckFuncs = [], font = None):
def longEnough(name):
if len(name) < 2:
notify.info('name is too short')
return OTPLocalizer.NCTooShort
def emptyName(name):
if name.strip() == '':
notify.info('name is empty')
return OTPLocalizer.NCTooShort
def printableChars(name):
for char in name:
if ord(char) < 128 and char not in string.printable:
notify.info('name contains non-printable char #%s' % ord(char))
return OTPLocalizer.NCGeneric
validAsciiChars = set(".,'-" + string.letters + string.whitespace)
def _validCharacter(c, validAsciiChars = validAsciiChars, font = font):
if c in validAsciiChars:
return True
if c.isalpha() or c.isspace():
return True
return False
def badCharacters(name, _validCharacter = _validCharacter):
for char in name:
if not _validCharacter(char):
if char in string.digits:
notify.info('name contains digits')
return OTPLocalizer.NCNoDigits
else:
notify.info('name contains bad char: %s' % TextEncoder().encodeWtext(char))
return OTPLocalizer.NCBadCharacter % TextEncoder().encodeWtext(char)
def fontHasCharacters(name, font = font):
if font:
tn = TextNode('NameCheck')
tn.setFont(font)
for c in name:
if not tn.hasCharacter(unicode(c)):
notify.info('name contains bad char: %s' % TextEncoder().encodeWtext(c))
return OTPLocalizer.NCBadCharacter % TextEncoder().encodeWtext(c)
def hasLetters(name):
words = wordList(name)
for word in words:
letters = justLetters(word)
if len(letters) == 0:
notify.info('word "%s" has no letters' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCNeedLetters
def hasVowels(name):
def perWord(word):
if '.' in word:
return None
for char in word:
if ord(char) >= 128:
return None
letters = filterString(word, string.letters)
if len(letters) > 2:
vowels = filterString(letters, 'aeiouyAEIOUY')
if len(vowels) == 0:
notify.info('word "%s" has no vowels' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCNeedVowels
return None
for word in wordList(name):
problem = perWord(word)
if problem:
return problem
def monoLetter(name):
def perWord(word):
word = word
letters = justLetters(word)
if len(letters) > 2:
letters = TextEncoder().decodeText(TextEncoder.lower(TextEncoder().encodeWtext(letters)))
filtered = filterString(letters, letters[0])
if filtered == letters:
notify.info('word "%s" uses only one letter' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCGeneric
for word in wordList(name):
problem = perWord(word)
if problem:
return problem
def checkDashes(name):
def validDash(index, name=name):
if index == 0 or i == len(name)-1:
return 0
if not name[i-1].isalpha():
return 0
if not name[i+1].isalpha():
return 0
return 1
i=0
while 1:
i = name.find('-', i, len(name))
if i < 0:
return None
if not validDash(i):
notify.info('name makes invalid use of dashes')
return OTPLocalizer.NCDashUsage
i += 1
def checkCommas(name):
def validComma(index, name=name):
if index == 0 or i == len(name)-1:
return OTPLocalizer.NCCommaEdge
if name[i-1].isspace():
return OTPLocalizer.NCCommaAfterWord
if not name[i+1].isspace():
return OTPLocalizer.NCCommaUsage
return None
i=0
while 1:
i = name.find(',', i, len(name))
if i < 0:
return None
problem = validComma(i)
if problem:
notify.info('name makes invalid use of commas')
return problem
i += 1
def checkPeriods(name):
words = wordList(name)
for word in words:
if word[-1] == ',':
word = word[:-1]
numPeriods = word.count('.')
if not numPeriods:
continue
letters = justLetters(word)
numLetters = len(letters)
if word[-1] != '.':
notify.info('word "%s" does not end in a period' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCPeriodUsage
if numPeriods > 2:
notify.info('word "%s" has too many periods' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCPeriodUsage
if numPeriods == 2:
if not (word[1] == '.' and word[3] == '.'):
notify.info('word "%s" does not fit the J.T. pattern' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCPeriodUsage
return None
def checkApostrophes(name):
words = wordList(name)
for word in words:
numApos = word.count("'")
if numApos > 2:
notify.info('word "%s" has too many apostrophes.' % TextEncoder().encodeWtext(word))
return OTPLocalizer.NCApostrophes
numApos = name.count("'")
if numApos > 3:
notify.info('name has too many apostrophes.')
return OTPLocalizer.NCApostrophes
def tooManyWords(name):
if len(wordList(name)) > 4:
notify.info('name has too many words')
return OTPLocalizer.NCTooManyWords
def allCaps(name):
letters = justLetters(name)
if len(letters) > 2:
upperLetters = TextEncoder().decodeText(TextEncoder.upper(TextEncoder().encodeWtext(letters)))
for i in xrange(len(upperLetters)):
if not upperLetters[0].isupper():
return
if upperLetters == letters:
notify.info('name is all caps')
return OTPLocalizer.NCAllCaps
def mixedCase(name):
words = wordList(name)
for word in words:
if len(word) > 2:
capitals = justUpper(word)
if len(capitals) > 2:
notify.info('name has mixed case')
return OTPLocalizer.NCMixedCase
def checkJapanese(name):
asciiSpace = range(32, 33)
asciiDigits = range(48, 64)
hiragana = range(12353, 12448)
katakana = range(12449, 12544)
halfwidthKatakana = range(65381, 65440)
halfwidthCharacter = set(asciiSpace + halfwidthKatakana)
allowedUtf8 = set(asciiSpace + hiragana + katakana + halfwidthKatakana)
te = TextEncoder()
dc = 0.0
for char in (ord(char) for char in te.decodeText(name)):
if char not in allowedUtf8:
if char in asciiDigits:
notify.info('name contains not allowed ascii digits')
return OTPLocalizer.NCNoDigits
else:
notify.info('name contains not allowed utf8 char: 0x%04x' % char)
return OTPLocalizer.NCBadCharacter % te.encodeWtext(unichr(char))
elif char in halfwidthCharacter:
dc += 0.5
else:
dc += 1
if dc < 2:
notify.info('name is too short: %0.1f' % dc)
return OTPLocalizer.NCTooShort
elif dc > 8:
notify.info('name has been occupied more than eight display cells: %0.1f' % dc)
return OTPLocalizer.NCGeneric
def repeatedChars(name):
count = 1
lastChar = None
i = 0
while i < len(name):
char = name[i]
i += 1
if char == lastChar:
count += 1
else:
count = 1
lastChar = char
if count > 2:
notify.info('character %s is repeated too many times' % TextEncoder().encodeWtext(char))
return OTPLocalizer.NCRepeatedChar % TextEncoder().encodeWtext(char)
return
checks = [printableChars,
badCharacters,
fontHasCharacters,
longEnough,
emptyName,
hasLetters,
hasVowels,
monoLetter,
checkDashes,
checkCommas,
checkPeriods,
checkApostrophes,
tooManyWords,
allCaps,
mixedCase,
repeatedChars] + otherCheckFuncs
symmetricChecks = []
name = TextEncoder().decodeText(name)
notify.info('checking name "%s"...' % TextEncoder().encodeWtext(name))
for check in checks:
problem = check(name[:])
if not problem and check in symmetricChecks:
nName = name[:]
bName.reverse()
problem = check(bName)
print 'problem = %s' % problem
if problem:
return problem
return None
severity = notify.getSeverity()
notify.setSeverity(NSError)
for i in xrange(32):
pass
for c in '!"#$%&()*+/:;<=>?@[\\]^_`{|}~':
pass
notify.setSeverity(severity)
del severity
| 31.925373 | 108 | 0.540252 |
acebc61d93ceeede126492a0e195bd0744584814 | 2,452 | py | Python | models/Unet/unet_parts.py | Minerva-J/Pytorch-Segmentation-multi-models | 0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9 | [
"Apache-2.0"
] | 84 | 2020-06-10T11:50:10.000Z | 2022-03-28T15:24:27.000Z | models/Unet/unet_parts.py | eeaesa/Pytorch-Segmentation-multi-models | 0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9 | [
"Apache-2.0"
] | 4 | 2020-08-10T07:22:08.000Z | 2022-01-28T01:58:25.000Z | models/Unet/unet_parts.py | eeaesa/Pytorch-Segmentation-multi-models | 0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9 | [
"Apache-2.0"
] | 28 | 2020-07-02T11:04:56.000Z | 2022-02-16T13:57:20.000Z | # sub-parts of the U-Net model
import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
# if bilinear:
# self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
# self.up = nn.functional.interpolate(scale_factor=(2, 2), mode='bilinear', align_corners=True)
# else:
# self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
# x1 = self.up(x1)
x1 = nn.functional.interpolate(x1, scale_factor=(2, 2), mode='bilinear', align_corners=True)
# print('0', x1.shape)
# input is CHW
# diffY = x2.size()[2] - x1.size()[2]
# diffX = x2.size()[3] - x1.size()[3]
x = torch.cat([x2, x1], dim=1)
# print('2', x.shape)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
# self.BN = nn.BatchNorm2d(out_ch)
def forward(self, x):
x = self.conv(x)
# x = self.BN(x)
return x
| 28.183908 | 107 | 0.567292 |
acebc7442c2d533d31cc52ccdd10fc928c5bf929 | 2,650 | py | Python | haystack/management/commands/build_solr_schema.py | disqus/django-haystack | ca46ed3ae699ab0326935829174b246e46d4b381 | [
"BSD-3-Clause"
] | 1 | 2017-10-16T01:53:26.000Z | 2017-10-16T01:53:26.000Z | haystack/management/commands/build_solr_schema.py | disqus/django-haystack | ca46ed3ae699ab0326935829174b246e46d4b381 | [
"BSD-3-Clause"
] | null | null | null | haystack/management/commands/build_solr_schema.py | disqus/django-haystack | ca46ed3ae699ab0326935829174b246e46d4b381 | [
"BSD-3-Clause"
] | null | null | null | from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from django.template import loader, Context
from haystack.constants import ID, DJANGO_CT, DJANGO_ID, DEFAULT_OPERATOR
try:
from django.utils import importlib
except ImportError:
from haystack.utils import importlib
class Command(BaseCommand):
help = "Generates a Solr schema that reflects the indexes."
base_options = (
make_option("-f", "--filename", action="store", type="string", dest="filename",
help='If provided, directs output to a file instead of stdout.',
),
make_option("-s", "--site", action="store", type="string", dest="site",
help='If provided, configures Haystack to use the appropriate site module. (Defaults to `haystack.site`)',
),
)
option_list = BaseCommand.option_list + base_options
def handle(self, **options):
"""Generates a Solr schema that reflects the indexes."""
if options.get('site'):
mod_name, attr_name = options['site'].rsplit('.', 1)
self.site = getattr(importlib.import_module(mod_name), attr_name)
else:
from haystack import site
self.site = site
schema_xml = self.build_template()
if options.get('filename'):
self.write_file(options.get('filename'), schema_xml)
else:
self.print_stdout(schema_xml)
def build_context(self):
# Cause the default site to load.
content_field_name, fields = self.site.backend.build_schema(self.site.all_searchfields())
return Context({
'content_field_name': content_field_name,
'fields': fields,
'default_operator': DEFAULT_OPERATOR,
'ID': ID,
'DJANGO_CT': DJANGO_CT,
'DJANGO_ID': DJANGO_ID,
})
def build_template(self):
t = loader.get_template('search_configuration/solr.xml')
c = self.build_context()
return t.render(c)
def print_stdout(self, schema_xml):
sys.stderr.write("\n")
sys.stderr.write("\n")
sys.stderr.write("\n")
sys.stderr.write("Save the following output to 'schema.xml' and place it in your Solr configuration directory.\n")
sys.stderr.write("--------------------------------------------------------------------------------------------\n")
sys.stderr.write("\n")
print schema_xml
def write_file(self, filename, schema_xml):
schema_file = open(filename, 'w')
schema_file.write(schema_xml)
schema_file.close()
| 38.405797 | 122 | 0.606792 |
acebc76ea36c9d159adfb89983ed518fedd2cbd5 | 6,547 | py | Python | forms/forms.py | alok102singh/courtside | 6d427391c543cc602ae2d92e1aa61ea15721645b | [
"MIT"
] | 49 | 2015-02-07T02:36:42.000Z | 2021-03-26T10:22:06.000Z | forms/forms.py | besartaliju/courtside | 6d427391c543cc602ae2d92e1aa61ea15721645b | [
"MIT"
] | 3 | 2015-02-07T02:37:41.000Z | 2016-12-09T19:53:13.000Z | forms/forms.py | besartaliju/courtside | 6d427391c543cc602ae2d92e1aa61ea15721645b | [
"MIT"
] | 24 | 2015-01-23T12:47:16.000Z | 2021-01-27T00:18:22.000Z | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from localflavor.us.forms import USPhoneNumberField
from register.models import Sport, GENDER_CHOICES
class PlayerForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
first_name = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
last_name = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
email = forms.EmailField()
sports = forms.ModelMultipleChoiceField(queryset=Sport.objects.all(),
widget=forms.CheckboxSelectMultiple(attrs={'class':'input_text'}),
label='Sports')
gender = forms.ChoiceField(choices=GENDER_CHOICES, widget=forms.Select,
label='Gender', help_text='optional', required=False)
phone_number = USPhoneNumberField(help_text='optional', required=False)
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(email=email)
if len(user) > 0:
raise forms.ValidationError('Email has been activated')
return self.cleaned_data['email']
def clean_username(self):
username = self.cleaned_data['username']
user = User.objects.filter(username=username)
if len(user) > 0:
raise forms.ValidationError('username has been already taken')
return self.cleaned_data['username']
class NewPlayerForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
first_name = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
last_name = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'input_text'}), label='Password')
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'input_text'}), label='Re-type Password')
email = forms.EmailField(widget=forms.TextInput(attrs={'class':'input_text'}))
sports = forms.ModelMultipleChoiceField(queryset=Sport.objects.all(),
widget=forms.CheckboxSelectMultiple(attrs={}),
label='Sports')
gender = forms.ChoiceField(choices=GENDER_CHOICES,
widget=forms.Select(attrs={'class':'input_text'}),
label='Gender', help_text='optional', required=False)
phone_number = USPhoneNumberField(widget=forms.TextInput(attrs={'class':'input_text'}), help_text='optional', required=False)
def clean_password2(self):
if self.cleaned_data['password2'] != self.cleaned_data['password1']:
raise forms.ValidationError('passwords do not match!')
return self.cleaned_data['password2']
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(email=email)
if len(user) > 0:
raise forms.ValidationError('Email has been activated')
return self.cleaned_data['email']
def clean_username(self):
username = self.cleaned_data['username']
user = User.objects.filter(username=username)
if len(user) > 0:
raise forms.ValidationError('username has been already taken')
return self.cleaned_data['username']
class GameForm(forms.Form):
sport = forms.ModelChoiceField(queryset=Sport.objects.all(), widget=forms.Select, label='Sports')
start_date = forms.DateField()
start_time = forms.TimeField()
address = forms.CharField(max_length=50)
minimum_players = forms.IntegerField()
restrictions = forms.CharField(max_length=200, help_text='eg., 3 on 3, Women Only', required=False)
class ProfileForm(forms.Form):
first_name = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
last_name = forms.CharField(widget=forms.TextInput(attrs={'class':'input_text'}), max_length=50)
email = forms.EmailField()
sports = forms.ModelMultipleChoiceField(queryset=Sport.objects.all(),
widget=forms.CheckboxSelectMultiple,
label='Sports')
gender = forms.ChoiceField(choices=GENDER_CHOICES,
widget=forms.Select,
label='Gender', help_text='optional', required=False)
phone_number = USPhoneNumberField(help_text='optional', required=False)
def clean_password2(self):
if self.cleaned_data['password2'] != self.cleaned_data['password1']:
raise forms.ValidationError('passwords do not match!')
return self.cleaned_data['password2']
def clean_email(self):
email = self.cleaned_data['email']
user = User.objects.filter(email=email)
if len(user) > 1:
raise forms.ValidationError('Email has been activated')
return self.cleaned_data['email']
class PasswordForm(forms.Form):
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'input_text'}), label='Password')
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'input_text'}), label='Re-type Password')
def clean_password2(self):
if self.cleaned_data['password2'] != self.cleaned_data['password1']:
raise forms.ValidationError('passwords do not match!')
return self.cleaned_data['password2']
class LoginForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class':'input_text', 'placeholder':'Your Email'}), label='Email')
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':'input_text', 'placeholder':'Password'}), label='Password')
def clean_password(self):
try:
email = self.cleaned_data['email']
except:
raise forms.ValidationError('Invalid username password combo!')
password = self.cleaned_data['password']
try:
user = User.objects.get(email=email)
except:
raise forms.ValidationError('Invalid username password combo!')
user = authenticate(username=user.username, password=password)
if user is None:
raise forms.ValidationError('Invalid username password combo!')
return self.cleaned_data['password']
| 49.598485 | 132 | 0.665496 |
acebc96c8fdbe31988ef9cf2e207c0fd5fd11f66 | 4,264 | py | Python | train/generator_only.py | nhjeong/Subsampling_aliasing_artifact_eliminator | 6a009ef5620bc3f541441ae688c44eac7670913b | [
"MIT"
] | 1 | 2019-01-25T13:31:07.000Z | 2019-01-25T13:31:07.000Z | train/generator_only.py | nhjeong/Subsampling_aliasing_artifact_eliminator | 6a009ef5620bc3f541441ae688c44eac7670913b | [
"MIT"
] | null | null | null | train/generator_only.py | nhjeong/Subsampling_aliasing_artifact_eliminator | 6a009ef5620bc3f541441ae688c44eac7670913b | [
"MIT"
] | 1 | 2018-12-05T05:00:43.000Z | 2018-12-05T05:00:43.000Z | import os
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import h5py
# % matplotlib inline # When drawing a plot for something at iPython environment
# Device configuration
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:2" if use_cuda else "cpu") # Assigning 0~3 (4 GPUs for 1 workstation)
# Hyper-parameters
latent_size = 5184
hidden_size_G = 1728
hidden_size_D = 108
image_size = 216
in_channel = 24 # 12-channel and real-imaginary
num_epochs = 500
batch_size = 5
test_total_slices = 13824
lr = 1e-4
sample_dir = '/home/nhjeong/MLPGAN/db'
# Create a directory if not exists
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
# Generator
G = nn.Sequential(
nn.Conv2d(in_channel, hidden_size_G, kernel_size=(image_size, 1)), # in_channels, out_channels, kernel_size
nn.ReLU(),
nn.Conv2d(hidden_size_G, hidden_size_G, 1), # 1x1 convolution
nn.ReLU(),
nn.Conv2d(hidden_size_G, hidden_size_G, 1),
nn.ReLU(),
nn.Conv2d(hidden_size_G, image_size, 1))
# Device setting
G = G.to(device)
# MSE, Binary cross entropy loss and optimizer
baseloss = nn.MSELoss()
g_optimizer = torch.optim.Adam(G.parameters(), lr=lr)
def reset_grad():
g_optimizer.zero_grad()
class MyDataset(torch.utils.data.Dataset):
def __init__(self, train=True):
self.train = train
if self.train:
self.train_X_mat = h5py.File('/home/nhjeong/MLPGAN/db/db_gan2.mat', 'r')
self.train_X_input = self.train_X_mat['db'][:]
self.train_Y_mat = h5py.File('/home/nhjeong/MLPGAN/db/gt_gan2.mat', 'r')
self.train_Y_input = self.train_Y_mat['gt'][:]
self.train_X_mat.close()
self.train_Y_mat.close()
else:
self.test_X_mat = h5py.File('/home/nhjeong/MLPGAN/db/test_db_gan2.mat', 'r')
self.test_X_input = self.test_X_mat['test_db'][:]
self.test_Y_mat = h5py.File('/home/nhjeong/MLPGAN/db/test_gt_gan2.mat', 'r')
self.test_Y_input = self.test_Y_mat['test_gt'][:]
self.test_X_mat.close()
self.test_Y_mat.close()
def __len__(self):
if self.train:
return self.train_X_input.shape[0]
else:
return self.test_X_input.shape[0]
def __getitem__(self, index):
if self.train:
raw, target = self.train_X_input[index,], self.train_Y_input[index,]
else:
raw, target = self.test_X_input[index,], self.test_Y_input[index,]
return raw, target
trainset = MyDataset(train=True)
testset = MyDataset(train=False)
trainloader = torch.utils.data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True)
train_loss = []
# Start training
total_step = len(trainloader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(trainloader):
images = images.permute(0, 3, 1, 2)
labels = labels.view((batch_size, 1, 216, 384))
index = np.random.randint(-6, 7) # steps of shift augmentation (Maximum 6 pixels)
images = np.roll(images, index, axis=2)
labels = np.roll(labels, index, axis=2)
images = torch.from_numpy(images).to(device)
labels = torch.from_numpy(labels).to(device)
# ================================================================== #
# Train the generator #
# ================================================================== #
# Compute loss with fake images
fake_images = G(images)
labels = labels.permute(0, 2, 1, 3)
# We train G to maximize log(D(G(z)) instead of minimizing log(1-D(G(z)))
# For the reason, see the last paragraph of section 3. https://arxiv.org/pdf/1406.2661.pdf
base_loss = baseloss(fake_images, labels)
train_loss.append(base_loss.data[0])
# Backprop and optimize
reset_grad()
base_loss.backward()
g_optimizer.step()
if (i + 1) % 10 == 0:
print('Epoch [{}/{}], Step [{}/{}], MSE: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, base_loss.item()))
| 31.124088 | 114 | 0.606238 |
acebc9f52d503f09f8857b32c8823c48f2150ee3 | 1,457 | py | Python | app/test/test2.py | sappachok/django-anaconda | 1ffd33ded759f622b6db23a3550a898b62350403 | [
"MIT"
] | null | null | null | app/test/test2.py | sappachok/django-anaconda | 1ffd33ded759f622b6db23a3550a898b62350403 | [
"MIT"
] | 7 | 2019-12-06T05:34:28.000Z | 2021-06-10T18:25:17.000Z | app/test/test2.py | sappachok/django-datasci | 1ffd33ded759f622b6db23a3550a898b62350403 | [
"MIT"
] | null | null | null | from subprocess import Popen, PIPE, STDOUT
import fcntl, os
import pty
import time
master, slave = pty.openpty()
proc = Popen(['python3', '-i'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
bufsize=1,
#universal_newlines=True
)
# proc.wait()
# tokenizer = subprocess.Popen(script, shell=True stdin=subprocess.PIPE, stdout=slave)
#fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
'''
fd = proc.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
'''
stdin_handle = proc.stdin
stdout_handle = os.fdopen(master)
def run_script(cmd, stdin, stdout):
stdin.write(cmd)
stdin.flush()
print(stdout.readline())
# p.wait()
# print(a)
# if not stdout.readline():
# print(stdout.readline())
def run_script2(cmd, stdin, stdout):
stdin.write(cmd)
stdin.flush()
print(stdout.readline())
commands = ['2+2\n', 'len("foobar")\n', 'print("Hello")\n', 'a=1\n', 'a']
for cmd in commands:
run_script(cmd.encode(), stdin_handle, proc.stdout)
# run_script2(cmd.encode(), proc.stdin, proc.stdout)
time.sleep(0.1)
# stdin_handle.write(b'import json\n')
# stdin_handle.flush()
# print(proc.stdout.readline())
'''
print(proc.stdout.readline())
'''
proc.stdin.close()
proc.terminate()
proc.wait(timeout=0.2) | 25.12069 | 86 | 0.609472 |
acebca0654b5f5af7e11006b62324861c481e771 | 205 | py | Python | logpuzzle/more_utilities_python.py | ubicu/google-python-exercises | b81b34caec0868d36511810df27c71d11cd9afd2 | [
"Apache-2.0"
] | null | null | null | logpuzzle/more_utilities_python.py | ubicu/google-python-exercises | b81b34caec0868d36511810df27c71d11cd9afd2 | [
"Apache-2.0"
] | null | null | null | logpuzzle/more_utilities_python.py | ubicu/google-python-exercises | b81b34caec0868d36511810df27c71d11cd9afd2 | [
"Apache-2.0"
] | null | null | null | # More exercises from Utilities lecture
import urllib
uf = urllib.urlopen('http://www.google.com')
# print(uf.read())
urllib.urlretrieve('http://www.google.com/intl/en_ALL/images/logo.gif','google.gif') | 25.625 | 84 | 0.741463 |
acebca9f8c523dbee420f1ebe1b0a951363c5e46 | 3,367 | py | Python | barts/main.py | lilydartdev/ppe-inventory | aaec9839fe324a3f96255756c15de45853bbb940 | [
"MIT"
] | 2 | 2020-10-06T11:33:02.000Z | 2021-10-10T13:10:12.000Z | barts/main.py | foundry4/ppe-inventory | 1ee782aeec5bd3cd0140480f9bf58396eb11403b | [
"MIT"
] | 1 | 2020-04-23T22:19:17.000Z | 2020-04-23T22:19:17.000Z | barts/main.py | foundry4/ppe-inventory | 1ee782aeec5bd3cd0140480f9bf58396eb11403b | [
"MIT"
] | 3 | 2020-05-26T11:41:40.000Z | 2020-06-29T08:53:34.000Z | from flask import request, make_response, redirect, render_template, abort, flash, url_for
from google.cloud import datastore
from google.cloud import pubsub_v1
import datetime
import json
import os
currentTime = datetime.datetime.now()
def barts(request):
sites = {'Royal London Hospital': 'pr234ted',
'Whipps Cross Hospital': 'el324os',
'St Bartholomew': 'ak907atp',
'Mile End Hospital': 'ap193fw',
'Newham Hospital': 'th738go',
'Nightingale Hospital': 'b047a27a-e6be-4a03-8dc9-2123495a3c09'}
post = False
site = None
landing = request.args.get('landing')
name = request.args.get('site')
code = request.args.get('code')
client = datastore.Client()
print(f'landing:{landing};name:{name};code:{code}')
if name and code:
site = get_site(name, code, client)
if site and request.method == 'POST':
print ("data are being updated.")
update_site(site, client, request, code)
publish_update(site)
post = True
if landing == 'true':
print('Landing == true')
template = 'barts.html'
else:
print('Landing != true')
site = get_site(name, code, client)
template = 'success.html' if post else 'form.html' if site else 'error.html'
# Construct a full URL to redirect to
# otherwise we seem to end up on http
domain = os.getenv('DOMAIN')
form_action = f'https://{domain}/barts?site={name}&code={code}'
landing_page = f'https://{domain}/barts?landing=true'
print(f"Rendering {template}")
response = make_response(render_template(template,
site=site,
sites=sites,
form_action=form_action,
landingPage=landing_page,
currentTime=datetime.datetime.now().strftime('%H:%M %d %B %y'),
assets='https://storage.googleapis.com/ppe-inventory',
data={}
))
return response
def get_site(name, code, client):
print(f"Getting site: {name}/{code}")
key = client.key('Site', name)
site = client.get(key)
if site and site.get('code') == code:
return site
return None
def update_site(site, client, request, code):
acute = site.get('acute')
print(f"Updating site: {site}/{code}")
# Update the site
site.update(request.form)
# Values not to change
site['site'] = site.key.name
site['acute'] = acute
site['code'] = code
print(f"Updating site {site}")
client.put(site)
def publish_update(site):
# Publish a message to update the Google Sheet:
message = {}
message.update(site)
message['last_update'] = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%H:%M %d %B %y')
publisher = pubsub_v1.PublisherClient()
project_id = os.getenv("PROJECT_ID")
topic_path = publisher.topic_path(project_id, 'form-submissions')
data = json.dumps(message).encode("utf-8")
future = publisher.publish(topic_path, data=data)
print(f"Published update to site {site.key.name}: {future.result()}")
| 32.066667 | 111 | 0.579151 |
acebcc603b7bc711abe4b7c2c1980aebe5f790af | 282 | py | Python | app/user/urls.py | alokKumarSingh89/recipe-app-api | 91c8ce21ad10e9099b039deceb2ef8ecea084374 | [
"MIT"
] | null | null | null | app/user/urls.py | alokKumarSingh89/recipe-app-api | 91c8ce21ad10e9099b039deceb2ef8ecea084374 | [
"MIT"
] | null | null | null | app/user/urls.py | alokKumarSingh89/recipe-app-api | 91c8ce21ad10e9099b039deceb2ef8ecea084374 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = "user"
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/',views.CreateTokenView.as_view(),name='token'),
path('me/', views.ManageViewUser.as_view(), name='me'),
]
| 21.692308 | 67 | 0.677305 |
acebce07d993d2407cba5abaf600d5f760f1a24a | 2,806 | py | Python | quizmake/grammar.py | jnguyen1098/quizmake | 760f59b06b3c574ac176305deeaa13d077080e64 | [
"0BSD"
] | 1 | 2021-06-21T21:51:44.000Z | 2021-06-21T21:51:44.000Z | quizmake/grammar.py | jnguyen1098/quizmake | 760f59b06b3c574ac176305deeaa13d077080e64 | [
"0BSD"
] | 8 | 2020-06-19T13:30:57.000Z | 2021-04-15T20:07:33.000Z | quizmake/grammar.py | jnguyen1098/quizmake | 760f59b06b3c574ac176305deeaa13d077080e64 | [
"0BSD"
] | null | null | null | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Grammar module for quizmake's question format(s).
This was mostly an exercise in using pyparsing.
"""
from pyparsing import (
CaselessLiteral,
Group,
Literal,
OneOrMore,
ParserElement,
Optional,
ParseResults,
Regex,
restOfLine,
ZeroOrMore,
)
def parse_file(filename: str) -> ParseResults:
"""Parse a file using the grammar."""
return bnf.parseFile(filename)
# This makes sure nothing is whitespace
ParserElement.setDefaultWhitespaceChars("")
newline = Literal("\n")
comment_line = Literal("//") + restOfLine + newline
content_line = Regex("[^[].*") + newline.suppress()
"""
Multiple choice question
"""
mc_question_header = CaselessLiteral("[multiple_choice]") + newline.suppress()
mc_question_section = mc_question_header + OneOrMore(content_line)
mc_answer_header = CaselessLiteral("[answer]") + newline.suppress()
mc_answer_section = mc_answer_header + OneOrMore(content_line)
mc_feedback_header = CaselessLiteral("[feedback]") + newline.suppress()
mc_feedback_section = mc_feedback_header + OneOrMore(content_line)
multiple_choice_q = (
ZeroOrMore(comment_line).suppress() + Group(mc_question_section) + Group(mc_answer_section) + Group(mc_feedback_section)
)
"""
Short answer question
"""
sa_question_header = CaselessLiteral("[short_answer]") + newline.suppress()
sa_question_section = sa_question_header + OneOrMore(content_line)
sa_answer_header = CaselessLiteral("[answer]") + newline.suppress()
sa_answer_section = sa_answer_header + OneOrMore(content_line)
short_answer_q = (
ZeroOrMore(comment_line).suppress() + Group(sa_question_section) + Group(sa_answer_section)
)
"""
True-False question
"""
true_false_q = Literal("Not implemented")
"""
Matching question
"""
m_question_header = CaselessLiteral("[matching]") + newline.suppress()
m_question_section = m_question_header + OneOrMore(content_line)
m_left_answer = CaselessLiteral("[left_match]") + newline.suppress()
m_left_section = m_left_answer + OneOrMore(content_line)
m_right_answer = CaselessLiteral("[right_match]") + newline.suppress()
m_right_section = m_right_answer + OneOrMore(content_line)
matching_q = (
ZeroOrMore(comment_line).suppress()
+ Group(m_question_section)
+ Group(m_left_section)
+ Group(m_right_section)
)
"""
Numerical question
"""
numerical_q = Literal("Not implemented")
"""
Essay question (free-form)
"""
essay_q = Literal("Not implemented")
"""
Description (not a question)
"""
description_q = Literal("Not implemented")
"""
Cloze question
"""
cloze_q = Literal("Not implemented")
question = (
multiple_choice_q
| short_answer_q
| true_false_q
| matching_q
| numerical_q
| essay_q
| description_q
| cloze_q
)
bnf = question
| 22.813008 | 124 | 0.734854 |
acebce1cdde76640d19da0d3fa5d2844b84928ad | 239 | py | Python | Python/basic-py/exercise_16.py | nhutnamhcmus/code | 22b528084ed234fcabca89cf1ba02a2c347007bc | [
"MIT"
] | 1 | 2020-10-12T18:33:22.000Z | 2020-10-12T18:33:22.000Z | Python/basic-py/exercise_16.py | nhutnamhcmus/code | 22b528084ed234fcabca89cf1ba02a2c347007bc | [
"MIT"
] | null | null | null | Python/basic-py/exercise_16.py | nhutnamhcmus/code | 22b528084ed234fcabca89cf1ba02a2c347007bc | [
"MIT"
] | null | null | null | import math
def calculateSinCosTan(x):
sine = math.sin(x)
cos = math.cos(x)
tan = math.tan(x)
return [sine, cos, tan]
print("sine:", calculateSinCosTan(-1))
print("cos:", calculateSinCosTan(0))
print("tan:", calculateSinCosTan(1)) | 23.9 | 38 | 0.686192 |
acebce332177cda248e7ec0b3a1730ba98f9e7c9 | 6,561 | py | Python | websockets/client.py | RojavaCrypto/websockets | cd8ab6610d11e5210744d02a9a7615868c17de0e | [
"BSD-3-Clause"
] | null | null | null | websockets/client.py | RojavaCrypto/websockets | cd8ab6610d11e5210744d02a9a7615868c17de0e | [
"BSD-3-Clause"
] | 4 | 2020-06-05T21:50:11.000Z | 2021-06-10T21:43:28.000Z | virtual/lib/python3.6/site-packages/websockets/client.py | CollinsMuiruri/IS-PROJECT | 2e59bb95a6dc3483e699140bde6792f6e92e1356 | [
"Unlicense"
] | 1 | 2021-09-06T00:06:01.000Z | 2021-09-06T00:06:01.000Z | """
The :mod:`websockets.client` module defines a simple WebSocket client API.
"""
import asyncio
import collections.abc
import email.message
from .exceptions import InvalidHandshake
from .handshake import build_request, check_response
from .http import USER_AGENT, read_response
from .protocol import CONNECTING, OPEN, WebSocketCommonProtocol
from .uri import parse_uri
__all__ = ['connect', 'WebSocketClientProtocol']
class WebSocketClientProtocol(WebSocketCommonProtocol):
"""
Complete WebSocket client implementation as an :class:`asyncio.Protocol`.
This class inherits most of its methods from
:class:`~websockets.protocol.WebSocketCommonProtocol`.
"""
is_client = True
state = CONNECTING
@asyncio.coroutine
def handshake(self, wsuri,
origin=None, subprotocols=None, extra_headers=None):
"""
Perform the client side of the opening handshake.
If provided, ``origin`` sets the Origin HTTP header.
If provided, ``subprotocols`` is a list of supported subprotocols in
order of decreasing preference.
If provided, ``extra_headers`` sets additional HTTP request headers.
It must be a mapping or an iterable of (name, value) pairs.
"""
headers = []
set_header = lambda k, v: headers.append((k, v))
if wsuri.port == (443 if wsuri.secure else 80): # pragma: no cover
set_header('Host', wsuri.host)
else:
set_header('Host', '{}:{}'.format(wsuri.host, wsuri.port))
if origin is not None:
set_header('Origin', origin)
if subprotocols is not None:
set_header('Sec-WebSocket-Protocol', ', '.join(subprotocols))
if extra_headers is not None:
if isinstance(extra_headers, collections.abc.Mapping):
extra_headers = extra_headers.items()
for name, value in extra_headers:
set_header(name, value)
set_header('User-Agent', USER_AGENT)
key = build_request(set_header)
self.request_headers = email.message.Message()
for name, value in headers:
self.request_headers[name] = value
self.raw_request_headers = headers
# Send handshake request. Since the URI and the headers only contain
# ASCII characters, we can keep this simple.
request = ['GET %s HTTP/1.1' % wsuri.resource_name]
request.extend('{}: {}'.format(k, v) for k, v in headers)
request.append('\r\n')
request = '\r\n'.join(request).encode()
self.writer.write(request)
# Read handshake response.
try:
status_code, headers = yield from read_response(self.reader)
except ValueError as exc:
raise InvalidHandshake("Malformed HTTP message") from exc
if status_code != 101:
raise InvalidHandshake("Bad status code: {}".format(status_code))
self.response_headers = headers
self.raw_response_headers = list(headers.raw_items())
get_header = lambda k: headers.get(k, '')
check_response(get_header, key)
self.subprotocol = headers.get('Sec-WebSocket-Protocol', None)
if (self.subprotocol is not None and
self.subprotocol not in subprotocols):
raise InvalidHandshake(
"Unknown subprotocol: {}".format(self.subprotocol))
assert self.state == CONNECTING
self.state = OPEN
self.opening_handshake.set_result(True)
@asyncio.coroutine
def connect(uri, *,
klass=WebSocketClientProtocol,
timeout=10, max_size=2 ** 20, max_queue=2 ** 5,
loop=None, legacy_recv=False,
origin=None, subprotocols=None, extra_headers=None,
**kwds):
"""
This coroutine connects to a WebSocket server at a given ``uri``.
It yields a :class:`WebSocketClientProtocol` which can then be used to
send and receive messages.
:func:`connect` is a wrapper around the event loop's
:meth:`~asyncio.BaseEventLoop.create_connection` method. Extra keyword
arguments are passed to :meth:`~asyncio.BaseEventLoop.create_connection`.
For example, you can set the ``ssl`` keyword argument to a
:class:`~ssl.SSLContext` to enforce some TLS settings. When connecting to
a ``wss://`` URI, if this argument isn't provided explicitly, it's set to
``True``, which means Python's default :class:`~ssl.SSLContext` is used.
The behavior of the ``timeout``, ``max_size``, and ``max_queue`` optional
arguments is described the documentation of
:class:`~websockets.protocol.WebSocketCommonProtocol`.
:func:`connect` also accepts the following optional arguments:
* ``origin`` sets the Origin HTTP header
* ``subprotocols`` is a list of supported subprotocols in order of
decreasing preference
* ``extra_headers`` sets additional HTTP request headers – it can be a
mapping or an iterable of (name, value) pairs
:func:`connect` raises :exc:`~websockets.uri.InvalidURI` if ``uri`` is
invalid and :exc:`~websockets.handshake.InvalidHandshake` if the opening
handshake fails.
On Python 3.5, :func:`connect` can be used as a asynchronous context
manager. In that case, the connection is closed when exiting the context.
"""
if loop is None:
loop = asyncio.get_event_loop()
wsuri = parse_uri(uri)
if wsuri.secure:
kwds.setdefault('ssl', True)
elif 'ssl' in kwds:
raise ValueError("connect() received a SSL context for a ws:// URI. "
"Use a wss:// URI to enable TLS.")
factory = lambda: klass(
host=wsuri.host, port=wsuri.port, secure=wsuri.secure,
timeout=timeout, max_size=max_size, max_queue=max_queue,
loop=loop, legacy_recv=legacy_recv,
)
transport, protocol = yield from loop.create_connection(
factory, wsuri.host, wsuri.port, **kwds)
try:
yield from protocol.handshake(
wsuri, origin=origin, subprotocols=subprotocols,
extra_headers=extra_headers)
except Exception:
yield from protocol.close_connection(force=True)
raise
return protocol
try:
from .py35.client import Connect
except (SyntaxError, ImportError): # pragma: no cover
pass
else:
Connect.__wrapped__ = connect
# Copy over docstring to support building documentation on Python 3.5.
Connect.__doc__ = connect.__doc__
connect = Connect
| 36.049451 | 78 | 0.65676 |
acebcf1123c350b56dbf324819aa04ffe4c081f4 | 41 | py | Python | conf.py | devqueue/IG-DM-count | bf5d89c29be769f7de8321d46da367cb63c5d09e | [
"MIT"
] | 1 | 2020-12-22T09:58:13.000Z | 2020-12-22T09:58:13.000Z | conf.py | devqueue/IG-DM-count | bf5d89c29be769f7de8321d46da367cb63c5d09e | [
"MIT"
] | null | null | null | conf.py | devqueue/IG-DM-count | bf5d89c29be769f7de8321d46da367cb63c5d09e | [
"MIT"
] | null | null | null | USERNAME = 'shadycoffe'
PASSWORD = 'pass' | 20.5 | 23 | 0.731707 |
acebcf4a8668b54a65222cd1828c43a9dd7a8f42 | 3,335 | py | Python | tests/test_distributor.py | khanof/jsynapse | 1200f28d661747a019d2f33bd5623c7bc635c59e | [
"Apache-2.0"
] | 1 | 2017-02-03T18:58:29.000Z | 2017-02-03T18:58:29.000Z | tests/test_distributor.py | khanof/jsynapse | 1200f28d661747a019d2f33bd5623c7bc635c59e | [
"Apache-2.0"
] | null | null | null | tests/test_distributor.py | khanof/jsynapse | 1200f28d661747a019d2f33bd5623c7bc635c59e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import unittest
from twisted.internet import defer
from mock import Mock, patch
from synapse.util.distributor import Distributor
from synapse.util.async import run_on_reactor
class DistributorTestCase(unittest.TestCase):
def setUp(self):
self.dist = Distributor()
@defer.inlineCallbacks
def test_signal_dispatch(self):
self.dist.declare("alert")
observer = Mock()
self.dist.observe("alert", observer)
d = self.dist.fire("alert", 1, 2, 3)
yield d
self.assertTrue(d.called)
observer.assert_called_with(1, 2, 3)
@defer.inlineCallbacks
def test_signal_dispatch_deferred(self):
self.dist.declare("whine")
d_inner = defer.Deferred()
def observer():
return d_inner
self.dist.observe("whine", observer)
d_outer = self.dist.fire("whine")
self.assertFalse(d_outer.called)
d_inner.callback(None)
yield d_outer
self.assertTrue(d_outer.called)
@defer.inlineCallbacks
def test_signal_catch(self):
self.dist.declare("alarm")
observers = [Mock() for i in 1, 2]
for o in observers:
self.dist.observe("alarm", o)
observers[0].side_effect = Exception("Awoogah!")
with patch(
"synapse.util.distributor.logger", spec=["warning"]
) as mock_logger:
d = self.dist.fire("alarm", "Go")
yield d
self.assertTrue(d.called)
observers[0].assert_called_once_with("Go")
observers[1].assert_called_once_with("Go")
self.assertEquals(mock_logger.warning.call_count, 1)
self.assertIsInstance(
mock_logger.warning.call_args[0][0], str
)
@defer.inlineCallbacks
def test_signal_catch_no_suppress(self):
# Gut-wrenching
self.dist.suppress_failures = False
self.dist.declare("whail")
class MyException(Exception):
pass
@defer.inlineCallbacks
def observer():
yield run_on_reactor()
raise MyException("Oopsie")
self.dist.observe("whail", observer)
d = self.dist.fire("whail")
yield self.assertFailure(d, MyException)
self.dist.suppress_failures = True
@defer.inlineCallbacks
def test_signal_prereg(self):
observer = Mock()
self.dist.observe("flare", observer)
self.dist.declare("flare")
yield self.dist.fire("flare", 4, 5)
observer.assert_called_with(4, 5)
def test_signal_undeclared(self):
def code():
self.dist.fire("notification")
self.assertRaises(KeyError, code)
| 27.336066 | 74 | 0.638381 |
acebcf8d9ef8c2c185d90cb6ac51d3c55b302679 | 1,233 | py | Python | docs/conf.py | MandyZ1998/get_ittf_info | 73bb52f99558fb4cc5d8b249bf4a766fe3c3a8c3 | [
"MIT"
] | null | null | null | docs/conf.py | MandyZ1998/get_ittf_info | 73bb52f99558fb4cc5d8b249bf4a766fe3c3a8c3 | [
"MIT"
] | null | null | null | docs/conf.py | MandyZ1998/get_ittf_info | 73bb52f99558fb4cc5d8b249bf4a766fe3c3a8c3 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
project = u"get_ittf_info"
copyright = u"2021, Mengdi Zhang"
author = u"Mengdi Zhang"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"autoapi.extension",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
autoapi_dirs = ["../src"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
| 33.324324 | 78 | 0.643958 |
acebd032141165ef3d329432c9d13e236151a516 | 2,690 | py | Python | mdingestion/reader/dc.py | cehbrecht/md-ingestion | 895ae4ce36c54662301541f58a1fd3a900e0aeea | [
"Apache-2.0"
] | 4 | 2016-01-04T11:15:07.000Z | 2021-08-12T05:23:51.000Z | mdingestion/reader/dc.py | cehbrecht/md-ingestion | 895ae4ce36c54662301541f58a1fd3a900e0aeea | [
"Apache-2.0"
] | 19 | 2015-06-04T10:02:53.000Z | 2020-09-21T12:46:00.000Z | mdingestion/reader/dc.py | cehbrecht/md-ingestion | 895ae4ce36c54662301541f58a1fd3a900e0aeea | [
"Apache-2.0"
] | 7 | 2018-02-07T15:34:10.000Z | 2021-03-25T09:08:22.000Z | import shapely
from .base import XMLReader
from ..sniffer import OAISniffer
class DublinCoreReader(XMLReader):
SNIFFER = OAISniffer
def parse(self, doc):
doc.title = self.find('title')
doc.description = self.find('description')
doc.keywords = self.find('subject')
doc.discipline = self.discipline(doc)
doc.doi = self.find_doi('metadata.identifier')
doc.pid = self.find_pid('metadata.identifier')
doc.source = self.find_source('metadata.identifier')
doc.related_identifier = self.related_identifier()
doc.creator = self.find('creator')
doc.publisher = self.find('publisher')
doc.contributor = self.find('contributor')
doc.publication_year = self.find('date')
doc.rights = self.find('rights')
doc.contact = doc.publisher
doc.language = self.find('language')
doc.resource_type = self.find('type')
doc.format = self.find('format')
# doc.temporal_coverage_begin = ''
# doc.temporal_coverage_end = ''
doc.geometry = self.find_geometry()
doc.places = self.places()
doc.size = self.find('extent')
doc.version = self.find('hasVersion')
def related_identifier(self):
urls = self.find('relation')
urls.extend(self.find('source'))
return urls
def places(self):
places = [s.text.strip() for s in self.parser.doc.find_all('spatial') if not s.attrs]
return places
def geometry(self):
if self.parser.doc.find('spatial', attrs={'xsi:type': 'dcterms:POINT'}):
# <dcterms:spatial xsi:type="dcterms:POINT">9.811246,56.302585</dcterms:spatial>
point = self.parser.doc.find('spatial', attrs={'xsi:type': 'dcterms:POINT'}).text.split(',')
lon = float(point[0])
lat = float(point[1])
# point: x=lon, y=lat
geometry = shapely.geometry.Point(lon, lat)
elif self.parser.doc.find('spatial', attrs={'xsi:type': 'DCTERMS:Box'}):
# <dc:coverage>North 37.30134, South 37.2888, East -32.275618, West -32.27982</dc:coverage>
# <dcterms:spatial xsi:type="DCTERMS:Box">37.2888 -32.27982 37.30134 -32.275618</dcterms:spatial>
bbox = self.parser.doc.find('spatial', attrs={'xsi:type': 'DCTERMS:Box'}).text.split()
south = float(bbox[0])
east = float(bbox[1])
north = float(bbox[2])
west = float(bbox[3])
# bbox: minx=west, miny=south, maxx=east, maxy=north
geometry = shapely.geometry.box(west, south, east, north)
else:
geometry = None
return geometry
| 41.384615 | 109 | 0.604461 |
acebd109b0ecab3ff45041c1f8202b9acad2db5f | 188 | py | Python | 4.Deep_Learning/test/pool-dims.py | NARUTONBM/Uda-machine-learning | e44d68b0f0aa0b8046330d07bc76a1b13fc9d99a | [
"MIT"
] | 1 | 2021-09-08T02:55:34.000Z | 2021-09-08T02:55:34.000Z | 4.Deep_Learning/test/pool-dims.py | NARUTONBM/Uda-machine-learning | e44d68b0f0aa0b8046330d07bc76a1b13fc9d99a | [
"MIT"
] | 7 | 2021-11-10T20:17:25.000Z | 2021-11-10T20:17:27.000Z | Deep.Learning/3.Convulutional-Networks/2.Convolutional-Neural-Networks/pool-dims.py | Scrier/udacity | 1326441aa2104a641b555676ec2429d8b6eb539f | [
"MIT"
] | null | null | null | from keras.models import Sequential
from keras.layers import MaxPooling2D
model = Sequential()
model.add(MaxPooling2D(pool_size=2, strides=2, input_shape=(100, 100, 15)))
model.summary()
| 26.857143 | 75 | 0.787234 |
acebd294f2f7b7b047fe46e8dcdb0d5c8210661e | 4,071 | py | Python | qcengine/programs/tests/test_standard_suite_ccsd(t).py | vivacebelles/QCEngine | d9a033f66cbbd4476bd0848d08323988c2726531 | [
"BSD-3-Clause"
] | null | null | null | qcengine/programs/tests/test_standard_suite_ccsd(t).py | vivacebelles/QCEngine | d9a033f66cbbd4476bd0848d08323988c2726531 | [
"BSD-3-Clause"
] | null | null | null | qcengine/programs/tests/test_standard_suite_ccsd(t).py | vivacebelles/QCEngine | d9a033f66cbbd4476bd0848d08323988c2726531 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import qcelemental as qcel
import qcengine as qcng
from qcelemental.testing import compare_values
from qcengine import testing
@pytest.fixture
def h2o():
smol = """
# R=0.958 A=104.5
H 0.000000000000 1.431430901356 0.984293362719
O 0.000000000000 0.000000000000 -0.124038860300
H 0.000000000000 -1.431430901356 0.984293362719
units au
"""
return qcel.models.Molecule.from_data(smol)
@pytest.fixture
def nh2():
smol = """
# R=1.008 #A=105.0
0 2
N 0.000000000000000 0.000000000000000 -0.145912918634892
H 0.000000000000000 -1.511214298139000 1.013682596946108
H 0.000000000000000 1.511214298139000 1.013682596946108
units au
"""
return qcel.models.Molecule.from_data(smol)
@pytest.mark.parametrize(
"program,basis,keywords",
[
pytest.param("cfour", "aug-pvdz", {"scf_conv": 12, "cc_conv": 12}, marks=testing.using_cfour),
pytest.param("cfour", "aug-pvdz", {}, marks=testing.using_cfour),
pytest.param("nwchem", "aug-cc-pvdz", {"basis__spherical": True}, marks=testing.using_nwchem),
pytest.param(
"nwchem", "aug-cc-pvdz", {"basis__spherical": True, "qc_module": "tce"}, marks=testing.using_nwchem
),
pytest.param("psi4", "aug-cc-pvdz", {}, marks=testing.using_psi4),
pytest.param("gamess", "accd", {"ccinp__ncore": 0, "contrl__ispher": 1}, marks=testing.using_gamess),
],
)
def test_sp_ccsd_t_rhf_full(program, basis, keywords, h2o):
"""cfour/sp-rhf-ccsd/input.dat
#! single point CCSD(T)/adz on water
"""
resi = {"molecule": h2o, "driver": "energy", "model": {"method": "ccsd(t)", "basis": basis}, "keywords": keywords}
res = qcng.compute(resi, program, raise_error=True, return_dict=True)
assert res["driver"] == "energy"
assert "provenance" in res
assert res["success"] is True
# aug-cc-pvdz
ccsd_t_tot = -76.276030676767
atol = 1.0e-6
assert compare_values(ccsd_t_tot, res["return_result"], atol=atol)
@pytest.mark.parametrize(
"program,basis,keywords,errmsg",
[
pytest.param(
"nwchem",
"aug-cc-pvdz",
{"ccsd__freeze": 1, "scf__uhf": True},
"ccsd: nopen is not zero",
marks=testing.using_nwchem,
),
pytest.param(
"gamess",
"accd",
{"contrl__scftyp": "uhf"},
"CCTYP IS PROGRAMMED ONLY FOR SCFTYP=RHF OR ROHF",
marks=testing.using_gamess,
),
],
)
def test_sp_ccsd_t_uhf_fc_error(program, basis, keywords, nh2, errmsg):
resi = {"molecule": nh2, "driver": "energy", "model": {"method": "ccsd(t)", "basis": basis}, "keywords": keywords}
with pytest.raises(qcng.exceptions.InputError) as e:
qcng.compute(resi, program, raise_error=True, return_dict=True)
assert errmsg in str(e.value)
@pytest.mark.parametrize(
"program,basis,keywords",
[
pytest.param(
"cfour",
"aug-pvdz",
{"reference": "rohf", "occupation": [[3, 1, 1, 0], [3, 0, 1, 0]], "scf_conv": 12, "cc_conv": 12},
marks=testing.using_cfour,
),
pytest.param("cfour", "aug-pvdz", {"reference": "rohf"}, marks=testing.using_cfour),
# pytest.param('nwchem', 'aug-cc-pvdz', {'basis__spherical': True, 'qc_module': 'tce', 'scf__rohf': True}, marks=testing.using_nwchem),
pytest.param("psi4", "aug-cc-pvdz", {"reference": "rohf"}, marks=testing.using_psi4),
],
)
def test_sp_ccsd_t_rohf_full(program, basis, keywords, nh2):
resi = {"molecule": nh2, "driver": "energy", "model": {"method": "ccsd(t)", "basis": basis}, "keywords": keywords}
res = qcng.compute(resi, program, raise_error=True, return_dict=True)
assert res["driver"] == "energy"
assert "provenance" in res
assert res["success"] is True
# aug-cc-pvdz
ccsd_t_tot = -55.752861467462
atol = 1.0e-6
assert compare_values(ccsd_t_tot, res["return_result"], atol=atol)
| 33.097561 | 143 | 0.618521 |
acebd3b297e8987ad48e0f0127cfc3fb864827ce | 133 | py | Python | GameDevPython&Pygame/gameobjects/__init__.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | GameDevPython&Pygame/gameobjects/__init__.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | GameDevPython&Pygame/gameobjects/__init__.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | __all__ = [
'vector2',
'vector3',
'util',
'sphere',
'matrix44',
'color',
'gametime',
'gameobjects',
'grid'
]
__version__ = "0.0.3"
| 8.866667 | 21 | 0.609023 |
acebd49cd5207af0039dcfdfdb8bfdca0fe8b03b | 1,307 | py | Python | lab/lab12/tests/partial-sums.py | AnthonyNg404/61A | 6b8fc656ef5438dc45e58d49b025bc653dda8655 | [
"Unlicense"
] | 8 | 2020-07-28T11:10:49.000Z | 2021-05-29T15:27:17.000Z | 31-Aggregation/lab11/lab11/tests/partial-sums.py | ericchen12377/CS61A_LearningDoc | 31f23962b0e2834795bf61eeb0f4884cc5da1809 | [
"MIT"
] | null | null | null | 31-Aggregation/lab11/lab11/tests/partial-sums.py | ericchen12377/CS61A_LearningDoc | 31f23962b0e2834795bf61eeb0f4884cc5da1809 | [
"MIT"
] | 1 | 2020-10-23T08:15:08.000Z | 2020-10-23T08:15:08.000Z | test = {
'name': 'partial-sums',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
scm> (define twos (cons-stream 2 twos))
twos
scm> (define evens (partial-sums twos))
evens
scm> (car evens)
2
scm> (car (cdr-stream evens))
4
scm> (car (cdr-stream (cdr-stream evens)))
6
scm> (define quadratic (partial-sums evens))
quadratic
scm> (car quadratic)
2
scm> (car (cdr-stream quadratic))
6
scm> (car (cdr-stream (cdr-stream quadratic)))
12
scm> (define finite (cons-stream 1 (cons-stream 2 (cons-stream 3 nil))))
finite
scm> (define finite-sums (partial-sums finite))
finite-sums
scm> (car finite-sums)
1
scm> (car (cdr-stream finite-sums))
3
scm> (car (cdr-stream (cdr-stream finite-sums)))
6
scm> (cdr-stream (cdr-stream (cdr-stream finite-sums)))
()
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
scm> (load-all ".")
""",
'teardown': '',
'type': 'scheme'
}
]
}
| 24.660377 | 82 | 0.441469 |
acebd4c9d96ff79603db9dc468169eb99f78649d | 2,503 | py | Python | femsnek/fields/field.py | szynka12/fem-snek | af9b0a50bfc86e73e103cba11241d7e67fbc82af | [
"MIT"
] | 3 | 2019-04-24T21:43:04.000Z | 2019-04-26T10:30:06.000Z | femsnek/fields/field.py | szynka12/fem-snek | af9b0a50bfc86e73e103cba11241d7e67fbc82af | [
"MIT"
] | null | null | null | femsnek/fields/field.py | szynka12/fem-snek | af9b0a50bfc86e73e103cba11241d7e67fbc82af | [
"MIT"
] | null | null | null | """
IGNORE: -----------------------------------------------------------
____ __
/ __/___ ____ ___ _____ ____ ___ / /__
/ /_ / _ \ / __ `__ \ ______ / ___// __ \ / _ \ / //_/
/ __// __// / / / / //_____/(__ )/ / / // __// ,<
/_/ \___//_/ /_/ /_/ /____//_/ /_/ \___//_/|_|
~~~~~~~~~ Finite element method python package ~~~~~~~~~
------------------------------------------------------------ IGNORE
.. module:: field
:synopsis: Module providing basic concepts for all field classes
.. moduleauthor:: Wojciech Sadowski <wojciech1sadowski@gmail.com>
"""
from abc import ABC, abstractmethod
from femsnek.fio.error import FieldOperationError
from femsnek.mesh import feMesh
class FieldBase(ABC):
__slots__ = ('_name',
'_region',
'_order',
'_ref_feMesh')
def __init__(self):
self._name = None
self._region = (None, None)
self._order = None
self._ref_feMesh = None
@property
@abstractmethod
def components(self):
"""
Decomposes field into scalar fields
"""
pass
def name(self) -> str:
"""
Returns name of the field.
:return: name of the field
"""
return self._name
def region(self) -> (str, int):
"""
Returns region tuple.
:return: region tuple
"""
return self._region
def order(self) -> int:
return self._order
def region_check(self, field) -> None:
"""
Raises FieldOperationError() when fields have different regions
:param field: second operand field
:type field: FieldBase
"""
if field.region() != (None, None) and field.region() != self._region:
raise FieldOperationError('Cant operate on ' + str(type(self)) + ' and ' + str(type(field)) + '!')
def order_check(self, field) -> None:
"""
Raises FieldOperationError() when fields have different orders
:param field: second operand field
:type field: FieldBase
"""
if self._order != field.order():
raise FieldOperationError('Cant operate on ' + str(type(self)) + ' and ' + str(type(field)) + '!')
class UniformField(FieldBase):
@property
@abstractmethod
def expand(self, mesh: feMesh):
"""
Expands uniform field onto the whole mesh
"""
| 27.811111 | 110 | 0.512185 |
acebd5732c3dfc64bf8627758c66101b92140e3d | 8,523 | py | Python | tests/test_configuration.py | RSEnergyGroup/incubator-airflow | e947c6c034238ede29a6c8f51307458d3e40c1b5 | [
"Apache-2.0"
] | null | null | null | tests/test_configuration.py | RSEnergyGroup/incubator-airflow | e947c6c034238ede29a6c8f51307458d3e40c1b5 | [
"Apache-2.0"
] | 1 | 2019-01-26T15:05:23.000Z | 2019-01-26T15:05:23.000Z | tests/test_configuration.py | RSEnergyGroup/incubator-airflow | e947c6c034238ede29a6c8f51307458d3e40c1b5 | [
"Apache-2.0"
] | 1 | 2021-11-02T23:42:39.000Z | 2021-11-02T23:42:39.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import unicode_literals
import os
from collections import OrderedDict
import six
from airflow import configuration
from airflow.configuration import conf, AirflowConfigParser, parameterized_config
if six.PY2:
# Need `assertWarns` back-ported from unittest2
import unittest2 as unittest
else:
import unittest
class ConfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['AIRFLOW__TESTSECTION__TESTKEY'] = 'testvalue'
os.environ['AIRFLOW__TESTSECTION__TESTPERCENT'] = 'with%percent'
configuration.load_test_config()
conf.set('core', 'percent', 'with%%inside')
@classmethod
def tearDownClass(cls):
del os.environ['AIRFLOW__TESTSECTION__TESTKEY']
del os.environ['AIRFLOW__TESTSECTION__TESTPERCENT']
def test_env_var_config(self):
opt = conf.get('testsection', 'testkey')
self.assertEqual(opt, 'testvalue')
opt = conf.get('testsection', 'testpercent')
self.assertEqual(opt, 'with%percent')
def test_conf_as_dict(self):
cfg_dict = conf.as_dict()
# test that configs are picked up
self.assertEqual(cfg_dict['core']['unit_test_mode'], 'True')
self.assertEqual(cfg_dict['core']['percent'], 'with%inside')
# test env vars
self.assertEqual(cfg_dict['testsection']['testkey'], '< hidden >')
def test_conf_as_dict_source(self):
# test display_source
cfg_dict = conf.as_dict(display_source=True)
self.assertEqual(
cfg_dict['core']['load_examples'][1], 'airflow.cfg')
self.assertEqual(
cfg_dict['testsection']['testkey'], ('< hidden >', 'env var'))
def test_conf_as_dict_sensitive(self):
# test display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True)
self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%percent')
# test display_source and display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True, display_source=True)
self.assertEqual(
cfg_dict['testsection']['testkey'], ('testvalue', 'env var'))
def test_conf_as_dict_raw(self):
# test display_sensitive
cfg_dict = conf.as_dict(raw=True, display_sensitive=True)
self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
# Values with '%' in them should be escaped
self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%%percent')
self.assertEqual(cfg_dict['core']['percent'], 'with%%inside')
def test_command_config(self):
TEST_CONFIG = '''[test]
key1 = hello
key2_cmd = printf cmd_result
key3 = airflow
key4_cmd = printf key4_result
'''
TEST_CONFIG_DEFAULT = '''[test]
key1 = awesome
key2 = airflow
[another]
key6 = value6
'''
test_conf = AirflowConfigParser(
default_config=parameterized_config(TEST_CONFIG_DEFAULT))
test_conf.read_string(TEST_CONFIG)
test_conf.as_command_stdout = test_conf.as_command_stdout | {
('test', 'key2'),
('test', 'key4'),
}
self.assertEqual('hello', test_conf.get('test', 'key1'))
self.assertEqual('cmd_result', test_conf.get('test', 'key2'))
self.assertEqual('airflow', test_conf.get('test', 'key3'))
self.assertEqual('key4_result', test_conf.get('test', 'key4'))
self.assertEqual('value6', test_conf.get('another', 'key6'))
self.assertTrue(test_conf.has_option('test', 'key1'))
self.assertTrue(test_conf.has_option('test', 'key2'))
self.assertTrue(test_conf.has_option('test', 'key3'))
self.assertTrue(test_conf.has_option('test', 'key4'))
self.assertFalse(test_conf.has_option('test', 'key5'))
self.assertTrue(test_conf.has_option('another', 'key6'))
cfg_dict = test_conf.as_dict(display_sensitive=True)
self.assertEqual('cmd_result', cfg_dict['test']['key2'])
self.assertNotIn('key2_cmd', cfg_dict['test'])
def test_remove_option(self):
TEST_CONFIG = '''[test]
key1 = hello
key2 = airflow
'''
TEST_CONFIG_DEFAULT = '''[test]
key1 = awesome
key2 = airflow
'''
test_conf = AirflowConfigParser(
default_config=parameterized_config(TEST_CONFIG_DEFAULT))
test_conf.read_string(TEST_CONFIG)
self.assertEqual('hello', test_conf.get('test', 'key1'))
test_conf.remove_option('test', 'key1', remove_default=False)
self.assertEqual('awesome', test_conf.get('test', 'key1'))
test_conf.remove_option('test', 'key2')
self.assertFalse(test_conf.has_option('test', 'key2'))
def test_getsection(self):
TEST_CONFIG = '''
[test]
key1 = hello
'''
TEST_CONFIG_DEFAULT = '''
[test]
key1 = awesome
key2 = airflow
[testsection]
key3 = value3
'''
test_conf = AirflowConfigParser(
default_config=parameterized_config(TEST_CONFIG_DEFAULT))
test_conf.read_string(TEST_CONFIG)
self.assertEqual(
OrderedDict([('key1', 'hello'), ('key2', 'airflow')]),
test_conf.getsection('test')
)
self.assertEqual(
OrderedDict([
('key3', 'value3'),
('testkey', 'testvalue'),
('testpercent', 'with%percent')]),
test_conf.getsection('testsection')
)
def test_broker_transport_options(self):
section_dict = conf.getsection("celery_broker_transport_options")
self.assertTrue(isinstance(section_dict['visibility_timeout'], int))
self.assertTrue(isinstance(section_dict['_test_only_bool'], bool))
self.assertTrue(isinstance(section_dict['_test_only_float'], float))
self.assertTrue(isinstance(section_dict['_test_only_string'], six.string_types))
def test_deprecated_options(self):
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
conf.deprecated_options['celery'] = {
'worker_concurrency': 'celeryd_concurrency',
}
# Remove it so we are sure we use the right setting
conf.remove_option('celery', 'worker_concurrency')
with self.assertWarns(DeprecationWarning):
os.environ['AIRFLOW__CELERY__CELERYD_CONCURRENCY'] = '99'
self.assertEquals(conf.getint('celery', 'worker_concurrency'), 99)
os.environ.pop('AIRFLOW__CELERY__CELERYD_CONCURRENCY')
with self.assertWarns(DeprecationWarning):
conf.set('celery', 'celeryd_concurrency', '99')
self.assertEquals(conf.getint('celery', 'worker_concurrency'), 99)
conf.remove_option('celery', 'celeryd_concurrency')
def test_deprecated_options_cmd(self):
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
conf.deprecated_options['celery'] = {'result_backend': 'celery_result_backend'}
conf.as_command_stdout.add(('celery', 'celery_result_backend'))
conf.remove_option('celery', 'result_backend')
conf.set('celery', 'celery_result_backend_cmd', '/bin/echo 99')
with self.assertWarns(DeprecationWarning):
tmp = None
if 'AIRFLOW__CELERY__RESULT_BACKEND' in os.environ:
tmp = os.environ.pop('AIRFLOW__CELERY__RESULT_BACKEND')
self.assertEquals(conf.getint('celery', 'result_backend'), 99)
if tmp:
os.environ['AIRFLOW__CELERY__RESULT_BACKEND'] = tmp
| 37.056522 | 88 | 0.668544 |
acebd64f2c594753e9631c1a57b438ff7cc590f1 | 531 | py | Python | server/djangoapp/admin.py | chkua00/cloud-app-development-capstone | 792264f40033cfcd7314c9c9693ae79dde2f9895 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/admin.py | chkua00/cloud-app-development-capstone | 792264f40033cfcd7314c9c9693ae79dde2f9895 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/admin.py | chkua00/cloud-app-development-capstone | 792264f40033cfcd7314c9c9693ae79dde2f9895 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import CarMake, CarModel
# Register your models here.
# CarModelInline class
class CarModelInLine(admin.StackedInline):
model = CarModel
# CarModelAdmin class
class CarModelAdmin(admin.ModelAdmin):
list_display = ['name']
# CarMakeAdmin class with CarModelInline
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInLine]
list_display = ['name']
# Register models here
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel, CarModelAdmin)
| 23.086957 | 44 | 0.777778 |
acebd6d41c07585a94067c0af8c43e6180aaaf6f | 165 | py | Python | src/server.py | shreyasiitr/acr-build-helloworld-node | ad1dc9bcdd0c9958d83849a1fe45bc13accf69a0 | [
"MIT"
] | null | null | null | src/server.py | shreyasiitr/acr-build-helloworld-node | ad1dc9bcdd0c9958d83849a1fe45bc13accf69a0 | [
"MIT"
] | null | null | null | src/server.py | shreyasiitr/acr-build-helloworld-node | ad1dc9bcdd0c9958d83849a1fe45bc13accf69a0 | [
"MIT"
] | null | null | null | from flask import Flask
server = Flask(__name__)
@server.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
server.run(host='0.0.0.0') | 18.333333 | 29 | 0.660606 |
acebd71134010da8089d5f7bfb565e3558acd2fe | 6,320 | py | Python | neko3/features/iss/__init__.py | Natsurii/nicabot-monkee | 0f32132184c31bea0015f232c0abf3ec993129fa | [
"MIT"
] | null | null | null | neko3/features/iss/__init__.py | Natsurii/nicabot-monkee | 0f32132184c31bea0015f232c0abf3ec993129fa | [
"MIT"
] | null | null | null | neko3/features/iss/__init__.py | Natsurii/nicabot-monkee | 0f32132184c31bea0015f232c0abf3ec993129fa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
Ported from Neko v1. Plots the ISS's location on a map.
"""
import datetime
import enum
import io
import discord
# noinspection PyPep8Naming
import PIL.Image as image
# noinspection PyPep8Naming
import PIL.ImageDraw as draw
from discord.ext import commands
import neko3.cog
from neko3 import files
from neko3 import neko_commands
from neko3 import theme
def _plot(latitude, longitude):
mercator = MercatorProjection()
x, y = mercator.swap_units(latitude, longitude, MapCoordinate.long_lat)
x, y = int(x), int(y)
pen = mercator.pen()
"""
pixels = [
(x - 1, y - 1), (x - 1, y + 1),
(x, y),
(x + 1, y - 1), (x + 1, y + 1),
]
pen.point([(x % mercator.width, y) for x, y in pixels], (255, 0,
0))
"""
pen.ellipse([(x - 4, y - 4), (x + 4, y + 4)], (255, 0, 0))
return mercator.image
class MapCoordinate(enum.Enum):
long_lat = enum.auto()
xy = enum.auto()
class MercatorProjection:
"""
Holds a PIL image and allows for manipulation using longitude-latitude
locations.
:param map_image: the image object to use for the projection.
"""
def __init__(self, map_image: image.Image = None):
"""
Creates a mercator projection from the given Image object.
This assumes that 0E,0N is at the central pixel.
If no image is given, the default mercator bitmap is used.
"""
if map_image is None:
map_image = image.open(files.in_here("mercator-small.png"))
self.image = map_image
self.ox, self.oy = map_image.width / 2, map_image.height / 2
# Differential of X in pixels per degree
self.dx = map_image.width / 360
# Differential of Y in pixels per degree
self.dy = map_image.height / 180
@property
def width(self):
return self.image.width
@property
def height(self):
return self.image.height
def swap_units(self, vertical, horizontal, input_measurement):
"""
Converts between X,Y and Lat,Long, depending on measurement.
:return a tuple of (x,y) or (lat,long)
"""
if input_measurement == MapCoordinate.long_lat:
horizontal = (horizontal * self.dx) + self.ox
vertical = self.oy - vertical * self.dy
return horizontal, vertical
elif input_measurement == MapCoordinate.xy:
horizontal = (horizontal - self.ox) / self.dx
vertical = (self.oy - vertical) / self.dy
return vertical, horizontal
else:
raise TypeError("Unknown measurement")
def duplicate(self):
"""Deep copy the projection."""
return MercatorProjection(self.image.copy())
def pen(self) -> draw.ImageDraw:
"""Gets an object capable of drawing over the projection."""
return draw.ImageDraw(self.image)
class SpaceCog(neko3.cog.CogBase):
async def plot(self, latitude, longitude, bytesio):
"""
Plots a longitude and latitude on a given mercator projection.
:param latitude: the latitude.
:param longitude: the longitude.
:param bytesio: the bytes IO to dump PNG data to.
"""
img = await self.run_in_process_pool(_plot, [latitude, longitude])
img.save(bytesio, "PNG")
# Seek back to the start
bytesio.seek(0)
@neko_commands.command(name="iss", aliases=["internationalspacestation"], brief="Shows you where the ISS is.")
@commands.cooldown(1, 30, commands.BucketType.guild)
async def iss_command(self, ctx):
"""
Calculates where above the Earth's surface the ISS is currently,
and plots it on a small map.
"""
with ctx.channel.typing():
# Plot the first point
with io.BytesIO() as b:
async with self.acquire_http_session() as http:
res = await http.request("GET", "https://api.wheretheiss.at/v1/satellites/25544")
data = await res.json()
image_fut = self.plot(data["latitude"], data["longitude"], b)
assert isinstance(data, dict), "I...I don't understand..."
long = data["longitude"]
lat = data["latitude"]
time = datetime.datetime.fromtimestamp(data["timestamp"])
altitude = data["altitude"]
velocity = data["velocity"]
is_day = data["visibility"] == "daylight"
desc = "\n".join(
[
f"**Longitude**: {long:.3f}°E",
f'**Latitude**: {abs(lat):.3f}°{"N" if lat >= 0 else "S"}',
f"**Altitude**: {altitude:.3f} km",
f"**Velocity**: {velocity:.3f} km/h",
f"**Timestamp**: {time} UTC",
]
)
embed = theme.generic_embed(
ctx=ctx,
title="International space station location",
description=desc,
url="http://www.esa.int/Our_Activities/Human_Spaceflight"
"/International_Space_Station"
"/Where_is_the_International_Space_Station ",
)
await image_fut
b.seek(0)
file = discord.File(b, "iss.png")
await ctx.send(file=file, embed=embed)
def setup(bot):
bot.add_cog(SpaceCog(bot))
| 31.133005 | 114 | 0.575949 |
acebd789254bf3a1eac94b9630f69a3074f51f86 | 173 | py | Python | euler2.py | pysaquib/Project-Euler-Tasks | bda073c14a9183b5efdf80fda66b0fc39f55449a | [
"MIT"
] | null | null | null | euler2.py | pysaquib/Project-Euler-Tasks | bda073c14a9183b5efdf80fda66b0fc39f55449a | [
"MIT"
] | null | null | null | euler2.py | pysaquib/Project-Euler-Tasks | bda073c14a9183b5efdf80fda66b0fc39f55449a | [
"MIT"
] | null | null | null | # ProjectEuler
# Sum of even fibonacci terms that doesn't exceed 4 millions
f = 0
s = 1
tSum = 0
while(f<4000000):
t=f
f=s
s=s+t
if(f%2==0):
tSum = tSum+f
print(tSum)
| 13.307692 | 60 | 0.647399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.